@@ -795,8 +795,7 @@ void G1CollectedHeap::prepare_for_mutator_after_full_collection(size_t allocatio
795
795
assert (num_free_regions () == 0 , " we should not have added any free regions" );
796
796
rebuild_region_sets (false /* free_list_only */ );
797
797
abort_refinement ();
798
- resize_heap_if_necessary (allocation_word_size);
799
- uncommit_regions_if_necessary ();
798
+ resize_heap_after_full_collection (allocation_word_size);
800
799
801
800
// Rebuild the code root lists for each region
802
801
rebuild_code_roots ();
@@ -879,21 +878,41 @@ void G1CollectedHeap::upgrade_to_full_collection() {
879
878
size_t (0 ) /* allocation_word_size */ );
880
879
}
881
880
882
- void G1CollectedHeap::resize_heap_if_necessary (size_t allocation_word_size) {
881
+
882
+ void G1CollectedHeap::resize_heap (size_t resize_bytes, bool should_expand) {
883
+ if (should_expand) {
884
+ expand (resize_bytes, _workers);
885
+ } else {
886
+ shrink (resize_bytes);
887
+ uncommit_regions_if_necessary ();
888
+ }
889
+ }
890
+
891
+ void G1CollectedHeap::resize_heap_after_full_collection (size_t allocation_word_size) {
883
892
assert_at_safepoint_on_vm_thread ();
884
893
885
894
bool should_expand;
886
- size_t resize_amount = _heap_sizing_policy->full_collection_resize_amount (should_expand, allocation_word_size);
895
+ size_t resize_bytes = _heap_sizing_policy->full_collection_resize_amount (should_expand, allocation_word_size);
887
896
888
- if (resize_amount == 0 ) {
889
- return ;
890
- } else if (should_expand) {
891
- expand (resize_amount, _workers);
892
- } else {
893
- shrink (resize_amount);
897
+ if (resize_bytes != 0 ) {
898
+ resize_heap (resize_bytes, should_expand);
894
899
}
895
900
}
896
901
902
+ void G1CollectedHeap::resize_heap_after_young_collection (size_t allocation_word_size) {
903
+ Ticks start = Ticks::now ();
904
+
905
+ bool should_expand;
906
+
907
+ size_t resize_bytes = _heap_sizing_policy->young_collection_resize_amount (should_expand, allocation_word_size);
908
+
909
+ if (resize_bytes != 0 ) {
910
+ resize_heap (resize_bytes, should_expand);
911
+ }
912
+
913
+ phase_times ()->record_resize_heap_time ((Ticks::now () - start).seconds () * 1000.0 );
914
+ }
915
+
897
916
HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper (size_t word_size,
898
917
bool do_gc,
899
918
bool maximal_compaction,
@@ -1002,22 +1021,22 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1002
1021
}
1003
1022
1004
1023
bool G1CollectedHeap::expand (size_t expand_bytes, WorkerThreads* pretouch_workers) {
1024
+ assert (expand_bytes > 0 , " precondition" );
1025
+
1005
1026
size_t aligned_expand_bytes = os::align_up_vm_page_size (expand_bytes);
1006
1027
aligned_expand_bytes = align_up (aligned_expand_bytes, G1HeapRegion::GrainBytes);
1007
1028
1008
- log_debug (gc, ergo, heap)(" Expand the heap. requested expansion amount: %zuB expansion amount: %zuB" ,
1009
- expand_bytes, aligned_expand_bytes);
1029
+ uint num_regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes);
1030
+
1031
+ log_debug (gc, ergo, heap)(" Heap resize. Requested expansion amount: %zuB aligned expansion amount: %zuB (%u regions)" ,
1032
+ expand_bytes, aligned_expand_bytes, num_regions_to_expand);
1010
1033
1011
1034
if (num_inactive_regions () == 0 ) {
1012
- log_debug (gc, ergo, heap)(" Did not expand the heap (heap already fully expanded)" );
1035
+ log_debug (gc, ergo, heap)(" Heap resize. Did not expand the heap (heap already fully expanded)" );
1013
1036
return false ;
1014
1037
}
1015
1038
1016
- uint regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes);
1017
- assert (regions_to_expand > 0 , " Must expand by at least one region" );
1018
-
1019
- uint expanded_by = _hrm.expand_by (regions_to_expand, pretouch_workers);
1020
- assert (expanded_by > 0 , " must have failed during commit." );
1039
+ uint expanded_by = _hrm.expand_by (num_regions_to_expand, pretouch_workers);
1021
1040
1022
1041
size_t actual_expand_bytes = expanded_by * G1HeapRegion::GrainBytes;
1023
1042
assert (actual_expand_bytes <= aligned_expand_bytes, " post-condition" );
@@ -1040,24 +1059,45 @@ bool G1CollectedHeap::expand_single_region(uint node_index) {
1040
1059
}
1041
1060
1042
1061
void G1CollectedHeap::shrink_helper (size_t shrink_bytes) {
1043
- size_t aligned_shrink_bytes = os::align_down_vm_page_size (shrink_bytes);
1044
- aligned_shrink_bytes = align_down (aligned_shrink_bytes, G1HeapRegion::GrainBytes);
1062
+ assert (shrink_bytes > 0 , " must be" );
1063
+ assert (is_aligned (shrink_bytes, G1HeapRegion::GrainBytes),
1064
+ " Shrink request for %zuB not aligned to heap region size %zuB" ,
1065
+ shrink_bytes, G1HeapRegion::GrainBytes);
1066
+
1045
1067
uint num_regions_to_remove = (uint)(shrink_bytes / G1HeapRegion::GrainBytes);
1046
1068
1047
1069
uint num_regions_removed = _hrm.shrink_by (num_regions_to_remove);
1048
1070
size_t shrunk_bytes = num_regions_removed * G1HeapRegion::GrainBytes;
1049
1071
1050
- log_debug (gc, ergo, heap)(" Shrink the heap. requested shrinking amount: %zuB aligned shrinking amount: %zuB actual amount shrunk: %zuB " ,
1051
- shrink_bytes, aligned_shrink_bytes, shrunk_bytes );
1072
+ log_debug (gc, ergo, heap)(" Heap resize. Requested shrinking amount: %zuB actual shrinking amount: %zuB (%u regions) " ,
1073
+ shrink_bytes, shrunk_bytes, num_regions_removed );
1052
1074
if (num_regions_removed > 0 ) {
1053
- log_debug (gc, heap)(" Uncommittable regions after shrink: %u" , num_regions_removed);
1054
1075
policy ()->record_new_heap_size (num_committed_regions ());
1055
1076
} else {
1056
- log_debug (gc, ergo, heap)(" Did not shrink the heap (heap shrinking operation failed)" );
1077
+ log_debug (gc, ergo, heap)(" Heap resize. Did not shrink the heap (heap shrinking operation failed)" );
1057
1078
}
1058
1079
}
1059
1080
1060
1081
void G1CollectedHeap::shrink (size_t shrink_bytes) {
1082
+ if (capacity () == min_capacity ()) {
1083
+ log_debug (gc, ergo, heap)(" Heap resize. Did not shrink the heap (heap already at minimum)" );
1084
+ return ;
1085
+ }
1086
+
1087
+ size_t aligned_shrink_bytes = os::align_down_vm_page_size (shrink_bytes);
1088
+ aligned_shrink_bytes = align_down (aligned_shrink_bytes, G1HeapRegion::GrainBytes);
1089
+
1090
+ aligned_shrink_bytes = capacity () - MAX2 (capacity () - aligned_shrink_bytes, min_capacity ());
1091
+ assert (is_aligned (aligned_shrink_bytes, G1HeapRegion::GrainBytes), " Bytes to shrink %zuB not aligned" , aligned_shrink_bytes);
1092
+
1093
+ log_debug (gc, ergo, heap)(" Heap resize. Requested shrink amount: %zuB aligned shrink amount: %zuB" ,
1094
+ shrink_bytes, aligned_shrink_bytes);
1095
+
1096
+ if (aligned_shrink_bytes == 0 ) {
1097
+ log_debug (gc, ergo, heap)(" Heap resize. Did not shrink the heap (shrink request too small)" );
1098
+ return ;
1099
+ }
1100
+
1061
1101
_verifier->verify_region_sets_optional ();
1062
1102
1063
1103
// We should only reach here at the end of a Full GC or during Remark which
@@ -1069,7 +1109,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
1069
1109
// could instead use the remove_all_pending() method on free_list to
1070
1110
// remove only the ones that we need to remove.
1071
1111
_hrm.remove_all_free_regions ();
1072
- shrink_helper (shrink_bytes );
1112
+ shrink_helper (aligned_shrink_bytes );
1073
1113
rebuild_region_sets (true /* free_list_only */ );
1074
1114
1075
1115
_hrm.verify_optional ();
@@ -1338,7 +1378,7 @@ jint G1CollectedHeap::initialize() {
1338
1378
}
1339
1379
1340
1380
os::trace_page_sizes (" Heap" ,
1341
- MinHeapSize ,
1381
+ min_capacity () ,
1342
1382
reserved_byte_size,
1343
1383
heap_rs.base (),
1344
1384
heap_rs.size (),
@@ -2024,7 +2064,7 @@ bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2024
2064
}
2025
2065
2026
2066
size_t G1CollectedHeap::tlab_capacity (Thread* ignored) const {
2027
- return (_policy-> young_list_target_length () - _survivor. length () ) * G1HeapRegion::GrainBytes;
2067
+ return eden_target_length ( ) * G1HeapRegion::GrainBytes;
2028
2068
}
2029
2069
2030
2070
size_t G1CollectedHeap::tlab_used (Thread* ignored) const {
@@ -2045,6 +2085,10 @@ size_t G1CollectedHeap::max_capacity() const {
2045
2085
return max_num_regions () * G1HeapRegion::GrainBytes;
2046
2086
}
2047
2087
2088
+ size_t G1CollectedHeap::min_capacity () const {
2089
+ return MinHeapSize;
2090
+ }
2091
+
2048
2092
void G1CollectedHeap::prepare_for_verify () {
2049
2093
_verifier->prepare_for_verify ();
2050
2094
}
@@ -2390,24 +2434,11 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
2390
2434
phase_times ()->record_verify_after_time_ms ((Ticks::now () - start).seconds () * MILLIUNITS);
2391
2435
}
2392
2436
2393
- void G1CollectedHeap::expand_heap_after_young_collection (){
2394
- size_t expand_bytes = _heap_sizing_policy->young_collection_expansion_amount ();
2395
- if (expand_bytes > 0 ) {
2396
- // No need for an ergo logging here,
2397
- // expansion_amount() does this when it returns a value > 0.
2398
- Ticks expand_start = Ticks::now ();
2399
- if (expand (expand_bytes, _workers)) {
2400
- double expand_ms = (Ticks::now () - expand_start).seconds () * MILLIUNITS;
2401
- phase_times ()->record_expand_heap_time (expand_ms);
2402
- }
2403
- }
2404
- }
2405
-
2406
- void G1CollectedHeap::do_collection_pause_at_safepoint () {
2437
+ void G1CollectedHeap::do_collection_pause_at_safepoint (size_t allocation_word_size) {
2407
2438
assert_at_safepoint_on_vm_thread ();
2408
2439
guarantee (!is_stw_gc_active (), " collection is not reentrant" );
2409
2440
2410
- do_collection_pause_at_safepoint_helper ();
2441
+ do_collection_pause_at_safepoint_helper (allocation_word_size );
2411
2442
}
2412
2443
2413
2444
G1HeapPrinterMark::G1HeapPrinterMark (G1CollectedHeap* g1h) : _g1h(g1h), _heap_transition(g1h) {
@@ -2471,7 +2502,7 @@ void G1CollectedHeap::flush_region_pin_cache() {
2471
2502
}
2472
2503
}
2473
2504
2474
- void G1CollectedHeap::do_collection_pause_at_safepoint_helper () {
2505
+ void G1CollectedHeap::do_collection_pause_at_safepoint_helper (size_t allocation_word_size ) {
2475
2506
ResourceMark rm;
2476
2507
2477
2508
IsSTWGCActiveMark active_gc_mark;
@@ -2489,7 +2520,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper() {
2489
2520
bool should_start_concurrent_mark_operation = collector_state ()->in_concurrent_start_gc ();
2490
2521
2491
2522
// Perform the collection.
2492
- G1YoungCollector collector (gc_cause ());
2523
+ G1YoungCollector collector (gc_cause (), allocation_word_size );
2493
2524
collector.collect ();
2494
2525
2495
2526
// It should now be safe to tell the concurrent mark thread to start
@@ -2611,6 +2642,13 @@ void G1CollectedHeap::set_young_gen_card_set_stats(const G1MonotonicArenaMemoryS
2611
2642
2612
2643
void G1CollectedHeap::record_obj_copy_mem_stats () {
2613
2644
size_t total_old_allocated = _old_evac_stats.allocated () + _old_evac_stats.direct_allocated ();
2645
+ uint total_allocated = _survivor_evac_stats.regions_filled () + _old_evac_stats.regions_filled ();
2646
+
2647
+ log_debug (gc)(" Allocated %u survivor %u old percent total %1.2f%% (%u%%)" ,
2648
+ _survivor_evac_stats.regions_filled (), _old_evac_stats.regions_filled (),
2649
+ percent_of (total_allocated, num_committed_regions () - total_allocated),
2650
+ G1ReservePercent);
2651
+
2614
2652
policy ()->old_gen_alloc_tracker ()->
2615
2653
add_allocated_bytes_since_last_gc (total_old_allocated * HeapWordSize);
2616
2654
0 commit comments