13
13
import org .opensearch .OpenSearchException ;
14
14
import org .opensearch .search .profile .AbstractProfileBreakdown ;
15
15
import org .opensearch .search .profile .ContextualProfileBreakdown ;
16
+ import org .opensearch .search .profile .Timer ;
16
17
17
18
import java .util .ArrayList ;
18
19
import java .util .Collections ;
@@ -42,6 +43,8 @@ public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBrea
42
43
// keep track of all breakdown timings per segment. package-private for testing
43
44
private final Map <Object , AbstractProfileBreakdown <QueryTimingType >> contexts = new ConcurrentHashMap <>();
44
45
46
+ private final Map <Long , Timer > threadToRewriteTimer = new ConcurrentHashMap <>();
47
+
45
48
// represents slice to leaves mapping as for each slice a unique collector instance is created
46
49
private final Map <Collector , List <LeafReaderContext >> sliceCollectorsToLeaves = new ConcurrentHashMap <>();
47
50
@@ -50,6 +53,10 @@ public ConcurrentQueryProfileBreakdown() {
50
53
super (QueryTimingType .class );
51
54
}
52
55
56
+ public Map <Long , Timer > getThreadToRewriteTimer () {
57
+ return threadToRewriteTimer ;
58
+ }
59
+
53
60
@ Override
54
61
public AbstractProfileBreakdown <QueryTimingType > context (Object context ) {
55
62
// See please https://bugs.openjdk.java.net/browse/JDK-8161372
@@ -93,11 +100,11 @@ public Map<String, Long> toBreakdownMap() {
93
100
*/
94
101
private Map <String , Long > buildDefaultQueryBreakdownMap (long createWeightTime ) {
95
102
final Map <String , Long > concurrentQueryBreakdownMap = new HashMap <>();
96
- for (QueryTimingType timingType : QueryTimingType .values ()) {
103
+ for (ConcurrentQueryTimingType timingType : ConcurrentQueryTimingType .values ()) {
97
104
final String timingTypeKey = timingType .toString ();
98
105
final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX ;
99
106
100
- if (timingType .equals (QueryTimingType .CREATE_WEIGHT )) {
107
+ if (timingType .equals (ConcurrentQueryTimingType .CREATE_WEIGHT )) {
101
108
concurrentQueryBreakdownMap .put (timingTypeKey , createWeightTime );
102
109
concurrentQueryBreakdownMap .put (timingTypeCountKey , 1L );
103
110
continue ;
@@ -248,7 +255,7 @@ public Map<String, Long> buildQueryBreakdownMap(
248
255
) {
249
256
final Map <String , Long > queryBreakdownMap = new HashMap <>();
250
257
long queryEndTime = Long .MIN_VALUE ;
251
- for (QueryTimingType queryTimingType : QueryTimingType .values ()) {
258
+ for (ConcurrentQueryTimingType queryTimingType : ConcurrentQueryTimingType .values ()) {
252
259
final String timingTypeKey = queryTimingType .toString ();
253
260
final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX ;
254
261
final String sliceEndTimeForTimingType = timingTypeKey + SLICE_END_TIME_SUFFIX ;
@@ -266,12 +273,76 @@ public Map<String, Long> buildQueryBreakdownMap(
266
273
long queryTimingTypeCount = 0L ;
267
274
268
275
// the create weight time is computed at the query level and is called only once per query
269
- if (queryTimingType == QueryTimingType .CREATE_WEIGHT ) {
276
+ if (queryTimingType == ConcurrentQueryTimingType .CREATE_WEIGHT ) {
270
277
queryBreakdownMap .put (timingTypeCountKey , 1L );
271
278
queryBreakdownMap .put (timingTypeKey , createWeightTime );
272
279
continue ;
273
280
}
274
281
282
+ if (queryTimingType == ConcurrentQueryTimingType .REWRITE ) {
283
+ if (threadToRewriteTimer .isEmpty ()) {
284
+ // add time related stats
285
+ queryBreakdownMap .put (timingTypeKey , 0L );
286
+ queryBreakdownMap .put (maxBreakdownTypeTime , 0L );
287
+ queryBreakdownMap .put (minBreakdownTypeTime , 0L );
288
+ queryBreakdownMap .put (avgBreakdownTypeTime , 0L );
289
+ // add count related stats
290
+ queryBreakdownMap .put (timingTypeCountKey , 0L );
291
+ queryBreakdownMap .put (maxBreakdownTypeCount , 0L );
292
+ queryBreakdownMap .put (minBreakdownTypeCount , 0L );
293
+ queryBreakdownMap .put (avgBreakdownTypeCount , 0L );
294
+ continue ;
295
+ }
296
+ for (Map .Entry <Long , Timer > rewrite : threadToRewriteTimer .entrySet ()) {
297
+ long sliceRewriteTime = rewrite .getValue ().getApproximateTiming ();
298
+ long sliceRewriteCount = rewrite .getValue ().getCount ();
299
+ long sliceRewriteStartTime = rewrite .getValue ().getEarliestTimerStartTime ();
300
+ // compute max/min/avg rewrite time across slices
301
+ queryBreakdownMap .compute (
302
+ maxBreakdownTypeTime ,
303
+ (key , value ) -> (value == null ) ? sliceRewriteTime : Math .max (sliceRewriteTime , value )
304
+ );
305
+ queryBreakdownMap .compute (
306
+ minBreakdownTypeTime ,
307
+ (key , value ) -> (value == null ) ? sliceRewriteTime : Math .min (sliceRewriteTime , value )
308
+ );
309
+ queryBreakdownMap .compute (
310
+ avgBreakdownTypeTime ,
311
+ (key , value ) -> (value == null ) ? sliceRewriteTime : sliceRewriteTime + value
312
+ );
313
+
314
+ // compute max/min/avg rewrite count across slices
315
+ queryBreakdownMap .compute (
316
+ maxBreakdownTypeCount ,
317
+ (key , value ) -> (value == null ) ? sliceRewriteCount : Math .max (sliceRewriteCount , value )
318
+ );
319
+ queryBreakdownMap .compute (
320
+ minBreakdownTypeCount ,
321
+ (key , value ) -> (value == null ) ? sliceRewriteCount : Math .min (sliceRewriteCount , value )
322
+ );
323
+ queryBreakdownMap .compute (
324
+ avgBreakdownTypeCount ,
325
+ (key , value ) -> (value == null ) ? sliceRewriteCount : sliceRewriteCount + value
326
+ );
327
+
328
+ // query start/end time for rewrite is min/max of start/end time across slices for that TimingType
329
+ queryTimingTypeEndTime = Math .max (
330
+ queryTimingTypeEndTime ,
331
+ sliceRewriteStartTime + sliceRewriteTime
332
+ );
333
+ queryTimingTypeStartTime = Math .min (
334
+ queryTimingTypeStartTime ,
335
+ sliceRewriteStartTime
336
+ );
337
+ queryTimingTypeCount += sliceRewriteCount ;
338
+ }
339
+ queryBreakdownMap .put (timingTypeKey , queryTimingTypeEndTime - queryTimingTypeStartTime );
340
+ queryBreakdownMap .put (timingTypeCountKey , queryTimingTypeCount );
341
+ queryBreakdownMap .compute (avgBreakdownTypeTime , (key , value ) -> (value == null ) ? 0L : value / threadToRewriteTimer .size ());
342
+ queryBreakdownMap .compute (avgBreakdownTypeCount , (key , value ) -> (value == null ) ? 0L : value / threadToRewriteTimer .size ());
343
+ continue ;
344
+ }
345
+
275
346
// for all other timing types, we will compute min/max/avg/total across slices
276
347
for (Map .Entry <Collector , Map <String , Long >> sliceBreakdown : sliceLevelBreakdowns .entrySet ()) {
277
348
long sliceBreakdownTypeTime = sliceBreakdown .getValue ().getOrDefault (timingTypeKey , 0L );
0 commit comments