@@ -4,22 +4,27 @@ import UIKit
4
4
#endif
5
5
6
6
final class RunLoopObserver {
7
+
8
+ static let SentryANRMechanismDataAppHangDuration = " app_hang_duration "
7
9
8
10
private let dateProvider : SentryCurrentDateProvider
9
11
private let threadInspector : ThreadInspector
10
12
private let debugImageCache : DebugImageCache
11
13
private let fileManager : SentryFileManager
14
+ private let crashWrapper : CrashWrapper
12
15
13
16
init (
14
17
dateProvider: SentryCurrentDateProvider ,
15
18
threadInspector: ThreadInspector ,
16
19
debugImageCache: DebugImageCache ,
17
20
fileManager: SentryFileManager ,
21
+ crashWrapper: CrashWrapper ,
18
22
minHangTime: TimeInterval ) {
19
23
self . dateProvider = dateProvider
20
24
self . threadInspector = threadInspector
21
25
self . debugImageCache = debugImageCache
22
26
self . fileManager = fileManager
27
+ self . crashWrapper = crashWrapper
23
28
self . lastFrameTime = 0
24
29
self . minHangTime = minHangTime
25
30
#if canImport(UIKit) && !SENTRY_NO_UIKIT
@@ -35,7 +40,7 @@ final class RunLoopObserver {
35
40
#endif
36
41
expectedFrameDuration = 1.0 / maxFPS
37
42
thresholdForFrameStacktrace = expectedFrameDuration * 0.5
38
- // TODO: Check for stored app hang
43
+ captureStoredAppHang ( )
39
44
}
40
45
41
46
// This queue is used to detect main thread hangs, they need to be detected on a background thread
@@ -131,6 +136,43 @@ final class RunLoopObserver {
131
136
return currentTime
132
137
}
133
138
139
+ func captureStoredAppHang( ) {
140
+ DispatchQueue . global ( qos: . background) . async { [ weak self] in
141
+ guard let self, let event = fileManager. readAppHangEvent ( ) else { return }
142
+
143
+ fileManager. deleteAppHangEvent ( )
144
+ if crashWrapper. crashedLastLaunch {
145
+ // The app crashed during an ongoing app hang. Capture the stored app hang as it is.
146
+ // We already applied the scope. We use an empty scope to avoid overwriting exising
147
+ // fields on the event.
148
+ SentrySDK . capture ( event: event, scope: Scope ( ) )
149
+ } else {
150
+ // Fatal App Hang
151
+ // We can't differ if the watchdog or the user terminated the app, because when the main
152
+ // thread is blocked we don't receive the applicationWillTerminate notification. Further
153
+ // investigations are required to validate if we somehow can differ between watchdog or
154
+ // user terminations; see https://github.com/getsentry/sentry-cocoa/issues/4845.
155
+ guard let exceptions = event. exceptions, let exception = exceptions. first, exceptions. count == 1 else {
156
+ SentrySDKLog . warning ( " The stored app hang event is expected to have exactly one exception, so we don't capture it. " )
157
+ return
158
+ }
159
+
160
+ SentryLevelBridge . setBreadcrumbLevelOn ( event, level: SentryLevel . fatal. rawValue)
161
+ event. exceptions? . first? . mechanism? . handled = false
162
+ let fatalExceptionType = SentryAppHangTypeMapper . getFatalExceptionType ( nonFatalErrorType: exception. type)
163
+ event. exceptions? . first? . type = fatalExceptionType
164
+
165
+ var mechanismData = exception. mechanism? . data
166
+ let durationInfo = mechanismData ? [ Self . SentryANRMechanismDataAppHangDuration] as? String ?? " over \( minHangTime) seconds "
167
+ mechanismData? . removeValue ( forKey: Self . SentryANRMechanismDataAppHangDuration)
168
+ event. exceptions? . first? . value = " The user or the OS watchdog terminated your app while it blocked the main thread for \( durationInfo) "
169
+ event. exceptions? . first? . mechanism? . data = mechanismData
170
+ SentryDependencyContainerSwiftHelper . captureFatalAppHang ( event)
171
+
172
+ }
173
+ }
174
+ }
175
+
134
176
// MARK: Background queue
135
177
136
178
private var blockingDuration : TimeInterval ?
@@ -151,20 +193,7 @@ final class RunLoopObserver {
151
193
break
152
194
}
153
195
}
154
-
155
- // TODO: Only write hang if it's long enough
156
- // TODO: Need to clear hang details after the hang ends
157
- // Problem: If we are detecting a multiple runloop hang, which then turns into a single long hang
158
- // we might want to add the total time of that long hang to what is on disk from the multiple runloop hang
159
- // Or we could not do that and just say we only overwrite what is on disk if the hang exceeds the time
160
- // of the multiple runloop hang.
161
- // Could have two paths, fullyBlocking only used when the semaphore times out, we keep tracking in memory until
162
- // it exceeds the threshold then we write to disk.
163
- // Non fully blocking only writes when the runloop finishes if it exceeds the threshold.
164
- // Sampled stacktrace should be kept separate from time, because time for nonFullyBlocking is kep on main thread
165
- // time for fullyBlocking is kept on background thread
166
-
167
- // TODO: Not using should sample
196
+
168
197
private func continueHang( started: TimeInterval , isStarting: Bool ) {
169
198
dispatchPrecondition ( condition: . onQueue( queue) )
170
199
@@ -186,11 +215,15 @@ final class RunLoopObserver {
186
215
187
216
// Safe to call from any thread
188
217
private func makeEvent( duration: TimeInterval , threads: [ SentryThread ] , type: SentryANRType ) -> Event {
189
- var event = Event ( )
218
+ let event = Event ( )
190
219
SentryLevelBridge . setBreadcrumbLevelOn ( event, level: SentryLevel . error. rawValue)
191
220
let exceptionType = SentryAppHangTypeMapper . getExceptionType ( anrType: type)
192
221
let exception = Exception ( value: String ( format: " App hanging for %.3f seconds. " , duration) , type: exceptionType)
193
222
let mechanism = Mechanism ( type: " AppHang " )
223
+ // We only temporarily store the app hang duration info, so we can change the error message
224
+ // when either sending a normal or fatal app hang event. Otherwise, we would have to rely on
225
+ // string parsing to retrieve the app hang duration info from the error message.
226
+ mechanism. data = [ Self . SentryANRMechanismDataAppHangDuration: " \( duration) seconds " ]
194
227
exception. mechanism = mechanism
195
228
exception. stacktrace = threads [ 0 ] . stacktrace
196
229
exception. stacktrace? . snapshot = true
@@ -212,12 +245,14 @@ final class RunLoopObserver {
212
245
dateProvider: SentryCurrentDateProvider ,
213
246
threadInspector: ThreadInspector ,
214
247
debugImageCache: DebugImageCache ,
215
- fileManager: SentryFileManager ) {
248
+ fileManager: SentryFileManager ,
249
+ crashWrapper: CrashWrapper ) {
216
250
observer = RunLoopObserver (
217
251
dateProvider: dateProvider,
218
252
threadInspector: threadInspector,
219
253
debugImageCache: debugImageCache,
220
254
fileManager: fileManager,
255
+ crashWrapper: crashWrapper,
221
256
minHangTime: 2 )
222
257
}
223
258
@@ -233,3 +268,7 @@ final class RunLoopObserver {
233
268
@objc @_spi ( Private) public protocol DebugImageCache {
234
269
func getDebugImagesFromCacheFor( threads: [ SentryThread ] ? ) -> [ DebugMeta ]
235
270
}
271
+
272
+ @objc @_spi ( Private) public protocol CrashWrapper {
273
+ var crashedLastLaunch : Bool { get }
274
+ }
0 commit comments