1494 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1495
1496 _young_list->reset_sampled_info();
1497 // At this point there should be no regions in the
1498 // entire heap tagged as young.
1499 assert(check_young_list_empty(true /* check_heap */),
1500 "young list should be empty at this point");
1501
1502 // Update the number of full collections that have been completed.
1503 increment_old_marking_cycles_completed(false /* concurrent */);
1504
1505 _hrs.verify_optional();
1506 verify_region_sets_optional();
1507
1508 verify_after_gc();
1509
1510 // Start a new incremental collection set for the next pause
1511 assert(g1_policy()->collection_set() == NULL, "must be");
1512 g1_policy()->start_incremental_cset_building();
1513
1514 // Clear the _cset_fast_test bitmap in anticipation of adding
1515 // regions to the incremental collection set for the next
1516 // evacuation pause.
1517 clear_cset_fast_test();
1518
1519 init_mutator_alloc_region();
1520
1521 double end = os::elapsedTime();
1522 g1_policy()->record_full_collection_end();
1523
1524 if (G1Log::fine()) {
1525 g1_policy()->print_heap_transition();
1526 }
1527
1528 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1529 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1530 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1531 // before any GC notifications are raised.
1532 g1mm()->update_sizes();
1533
1534 gc_epilogue(true);
1535 }
1536
1916 _mark_in_progress(false),
1917 _cg1r(NULL), _summary_bytes_used(0),
1918 _g1mm(NULL),
1919 _refine_cte_cl(NULL),
1920 _full_collection(false),
1921 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1922 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1923 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1924 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1925 _free_regions_coming(false),
1926 _young_list(new YoungList(this)),
1927 _gc_time_stamp(0),
1928 _retained_old_gc_alloc_region(NULL),
1929 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1930 _old_plab_stats(OldPLABSize, PLABWeight),
1931 _expand_heap_after_alloc_failure(true),
1932 _surviving_young_words(NULL),
1933 _old_marking_cycles_started(0),
1934 _old_marking_cycles_completed(0),
1935 _concurrent_cycle_started(false),
1936 _in_cset_fast_test(NULL),
1937 _in_cset_fast_test_base(NULL),
1938 _dirty_cards_region_list(NULL),
1939 _worker_cset_start_region(NULL),
1940 _worker_cset_start_region_time_stamp(NULL),
1941 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1942 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1943 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1944 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1945
1946 _g1h = this;
1947 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1948 vm_exit_during_initialization("Failed necessary allocation.");
1949 }
1950
1951 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1952
1953 int n_queues = MAX2((int)ParallelGCThreads, 1);
1954 _task_queues = new RefToScanQueueSet(n_queues);
1955
1956 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1957 assert(n_rem_sets > 0, "Invariant.");
2059 // Do later initialization work for concurrent refinement.
2060 _cg1r->init();
2061
2062 // 6843694 - ensure that the maximum region index can fit
2063 // in the remembered set structures.
2064 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2065 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2066
2067 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2068 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2069 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2070 "too many cards per region");
2071
2072 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2073
2074 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2075 heap_word_size(init_byte_size));
2076
2077 _g1h = this;
2078
2079 _in_cset_fast_test_length = max_regions();
2080 _in_cset_fast_test_base =
2081 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2082
2083 // We're biasing _in_cset_fast_test to avoid subtracting the
2084 // beginning of the heap every time we want to index; basically
2085 // it's the same with what we do with the card table.
2086 _in_cset_fast_test = _in_cset_fast_test_base -
2087 ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2088
2089 // Clear the _cset_fast_test bitmap in anticipation of adding
2090 // regions to the incremental collection set for the first
2091 // evacuation pause.
2092 clear_cset_fast_test();
2093
2094 // Create the ConcurrentMark data structure and thread.
2095 // (Must do this late, so that "max_regions" is defined.)
2096 _cm = new ConcurrentMark(this, heap_rs);
2097 if (_cm == NULL || !_cm->completed_initialization()) {
2098 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2099 return JNI_ENOMEM;
2100 }
2101 _cmThread = _cm->cmThread();
2102
2103 // Initialize the from_card cache structure of HeapRegionRemSet.
2104 HeapRegionRemSet::init_heap(max_regions());
2105
2106 // Now expand into the initial heap size.
2107 if (!expand(init_byte_size)) {
2108 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2109 return JNI_ENOMEM;
2110 }
2111
2112 // Perform any initialization actions delegated to the policy.
4128 evacuate_collection_set(evacuation_info);
4129
4130 // We do this to mainly verify the per-thread SATB buffers
4131 // (which have been filtered by now) since we didn't verify
4132 // them earlier. No point in re-checking the stacks / enqueued
4133 // buffers given that the CSet has not changed since last time
4134 // we checked.
4135 _cm->verify_no_cset_oops(false /* verify_stacks */,
4136 false /* verify_enqueued_buffers */,
4137 true /* verify_thread_buffers */,
4138 true /* verify_fingers */);
4139
4140 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4141 g1_policy()->clear_collection_set();
4142
4143 cleanup_surviving_young_words();
4144
4145 // Start a new incremental collection set for the next pause.
4146 g1_policy()->start_incremental_cset_building();
4147
4148 // Clear the _cset_fast_test bitmap in anticipation of adding
4149 // regions to the incremental collection set for the next
4150 // evacuation pause.
4151 clear_cset_fast_test();
4152
4153 _young_list->reset_sampled_info();
4154
4155 // Don't check the whole heap at this point as the
4156 // GC alloc regions from this pause have been tagged
4157 // as survivors and moved on to the survivor list.
4158 // Survivor regions will fail the !is_young() check.
4159 assert(check_young_list_empty(false /* check_heap */),
4160 "young list should be empty");
4161
4162 #if YOUNG_LIST_VERBOSE
4163 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4164 _young_list->print();
4165 #endif // YOUNG_LIST_VERBOSE
4166
4167 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
4168 _young_list->first_survivor_region(),
4169 _young_list->last_survivor_region());
4170
|
1494 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1495
1496 _young_list->reset_sampled_info();
1497 // At this point there should be no regions in the
1498 // entire heap tagged as young.
1499 assert(check_young_list_empty(true /* check_heap */),
1500 "young list should be empty at this point");
1501
1502 // Update the number of full collections that have been completed.
1503 increment_old_marking_cycles_completed(false /* concurrent */);
1504
1505 _hrs.verify_optional();
1506 verify_region_sets_optional();
1507
1508 verify_after_gc();
1509
1510 // Start a new incremental collection set for the next pause
1511 assert(g1_policy()->collection_set() == NULL, "must be");
1512 g1_policy()->start_incremental_cset_building();
1513
1514 clear_cset_fast_test();
1515
1516 init_mutator_alloc_region();
1517
1518 double end = os::elapsedTime();
1519 g1_policy()->record_full_collection_end();
1520
1521 if (G1Log::fine()) {
1522 g1_policy()->print_heap_transition();
1523 }
1524
1525 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1526 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1527 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1528 // before any GC notifications are raised.
1529 g1mm()->update_sizes();
1530
1531 gc_epilogue(true);
1532 }
1533
1913 _mark_in_progress(false),
1914 _cg1r(NULL), _summary_bytes_used(0),
1915 _g1mm(NULL),
1916 _refine_cte_cl(NULL),
1917 _full_collection(false),
1918 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1919 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1920 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1921 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1922 _free_regions_coming(false),
1923 _young_list(new YoungList(this)),
1924 _gc_time_stamp(0),
1925 _retained_old_gc_alloc_region(NULL),
1926 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1927 _old_plab_stats(OldPLABSize, PLABWeight),
1928 _expand_heap_after_alloc_failure(true),
1929 _surviving_young_words(NULL),
1930 _old_marking_cycles_started(0),
1931 _old_marking_cycles_completed(0),
1932 _concurrent_cycle_started(false),
1933 _in_cset_fast_test(),
1934 _dirty_cards_region_list(NULL),
1935 _worker_cset_start_region(NULL),
1936 _worker_cset_start_region_time_stamp(NULL),
1937 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1938 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1939 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1940 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1941
1942 _g1h = this;
1943 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1944 vm_exit_during_initialization("Failed necessary allocation.");
1945 }
1946
1947 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1948
1949 int n_queues = MAX2((int)ParallelGCThreads, 1);
1950 _task_queues = new RefToScanQueueSet(n_queues);
1951
1952 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1953 assert(n_rem_sets > 0, "Invariant.");
2055 // Do later initialization work for concurrent refinement.
2056 _cg1r->init();
2057
2058 // 6843694 - ensure that the maximum region index can fit
2059 // in the remembered set structures.
2060 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2061 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2062
2063 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2064 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2065 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2066 "too many cards per region");
2067
2068 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2069
2070 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2071 heap_word_size(init_byte_size));
2072
2073 _g1h = this;
2074
2075 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2076
2077 // Create the ConcurrentMark data structure and thread.
2078 // (Must do this late, so that "max_regions" is defined.)
2079 _cm = new ConcurrentMark(this, heap_rs);
2080 if (_cm == NULL || !_cm->completed_initialization()) {
2081 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2082 return JNI_ENOMEM;
2083 }
2084 _cmThread = _cm->cmThread();
2085
2086 // Initialize the from_card cache structure of HeapRegionRemSet.
2087 HeapRegionRemSet::init_heap(max_regions());
2088
2089 // Now expand into the initial heap size.
2090 if (!expand(init_byte_size)) {
2091 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2092 return JNI_ENOMEM;
2093 }
2094
2095 // Perform any initialization actions delegated to the policy.
4111 evacuate_collection_set(evacuation_info);
4112
4113 // We do this to mainly verify the per-thread SATB buffers
4114 // (which have been filtered by now) since we didn't verify
4115 // them earlier. No point in re-checking the stacks / enqueued
4116 // buffers given that the CSet has not changed since last time
4117 // we checked.
4118 _cm->verify_no_cset_oops(false /* verify_stacks */,
4119 false /* verify_enqueued_buffers */,
4120 true /* verify_thread_buffers */,
4121 true /* verify_fingers */);
4122
4123 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4124 g1_policy()->clear_collection_set();
4125
4126 cleanup_surviving_young_words();
4127
4128 // Start a new incremental collection set for the next pause.
4129 g1_policy()->start_incremental_cset_building();
4130
4131 clear_cset_fast_test();
4132
4133 _young_list->reset_sampled_info();
4134
4135 // Don't check the whole heap at this point as the
4136 // GC alloc regions from this pause have been tagged
4137 // as survivors and moved on to the survivor list.
4138 // Survivor regions will fail the !is_young() check.
4139 assert(check_young_list_empty(false /* check_heap */),
4140 "young list should be empty");
4141
4142 #if YOUNG_LIST_VERBOSE
4143 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4144 _young_list->print();
4145 #endif // YOUNG_LIST_VERBOSE
4146
4147 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
4148 _young_list->first_survivor_region(),
4149 _young_list->last_survivor_region());
4150
|