1607 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1608
1609 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1610
1611 {
1612 HeapWord* start = _hrm.reserved().start();
1613 HeapWord* end = _hrm.reserved().end();
1614 size_t granularity = HeapRegion::GrainBytes;
1615
1616 _in_cset_fast_test.initialize(start, end, granularity);
1617 _humongous_reclaim_candidates.initialize(start, end, granularity);
1618 }
1619
1620 // Create the G1ConcurrentMark data structure and thread.
1621 // (Must do this late, so that "max_regions" is defined.)
1622 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1623 if (_cm == NULL || !_cm->completed_initialization()) {
1624 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1625 return JNI_ENOMEM;
1626 }
1627 _cmThread = _cm->cm_thread();
1628
1629 // Now expand into the initial heap size.
1630 if (!expand(init_byte_size, _workers)) {
1631 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1632 return JNI_ENOMEM;
1633 }
1634
1635 // Perform any initialization actions delegated to the policy.
1636 g1_policy()->init(this, &_collection_set);
1637
1638 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1639 SATB_Q_FL_lock,
1640 G1SATBProcessCompletedThreshold,
1641 Shared_SATB_Q_lock);
1642
1643 jint ecode = initialize_concurrent_refinement();
1644 if (ecode != JNI_OK) {
1645 return ecode;
1646 }
|
1607 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1608
1609 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1610
1611 {
1612 HeapWord* start = _hrm.reserved().start();
1613 HeapWord* end = _hrm.reserved().end();
1614 size_t granularity = HeapRegion::GrainBytes;
1615
1616 _in_cset_fast_test.initialize(start, end, granularity);
1617 _humongous_reclaim_candidates.initialize(start, end, granularity);
1618 }
1619
1620 // Create the G1ConcurrentMark data structure and thread.
1621 // (Must do this late, so that "max_regions" is defined.)
1622 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1623 if (_cm == NULL || !_cm->completed_initialization()) {
1624 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1625 return JNI_ENOMEM;
1626 }
1627 _cm_thread = _cm->cm_thread();
1628
1629 // Now expand into the initial heap size.
1630 if (!expand(init_byte_size, _workers)) {
1631 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1632 return JNI_ENOMEM;
1633 }
1634
1635 // Perform any initialization actions delegated to the policy.
1636 g1_policy()->init(this, &_collection_set);
1637
1638 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1639 SATB_Q_FL_lock,
1640 G1SATBProcessCompletedThreshold,
1641 Shared_SATB_Q_lock);
1642
1643 jint ecode = initialize_concurrent_refinement();
1644 if (ecode != JNI_OK) {
1645 return ecode;
1646 }
|
1696 void G1CollectedHeap::initialize_serviceability() {
1697 _eden_pool = new G1EdenPool(this);
1698 _survivor_pool = new G1SurvivorPool(this);
1699 _old_pool = new G1OldGenPool(this);
1700
1701 _full_gc_memory_manager.add_pool(_eden_pool);
1702 _full_gc_memory_manager.add_pool(_survivor_pool);
1703 _full_gc_memory_manager.add_pool(_old_pool);
1704
1705 _memory_manager.add_pool(_eden_pool);
1706 _memory_manager.add_pool(_survivor_pool);
1707
1708 }
1709
1710 void G1CollectedHeap::stop() {
1711 // Stop all concurrent threads. We do this to make sure these threads
1712 // do not continue to execute and access resources (e.g. logging)
1713 // that are destroyed during shutdown.
1714 _cr->stop();
1715 _young_gen_sampling_thread->stop();
1716 _cmThread->stop();
1717 if (G1StringDedup::is_enabled()) {
1718 G1StringDedup::stop();
1719 }
1720 }
1721
1722 void G1CollectedHeap::safepoint_synchronize_begin() {
1723 SuspendibleThreadSet::synchronize();
1724 }
1725
1726 void G1CollectedHeap::safepoint_synchronize_end() {
1727 SuspendibleThreadSet::desynchronize();
1728 }
1729
1730 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1731 return HeapRegion::max_region_size();
1732 }
1733
1734 void G1CollectedHeap::post_initialize() {
1735 CollectedHeap::post_initialize();
|
1696 void G1CollectedHeap::initialize_serviceability() {
1697 _eden_pool = new G1EdenPool(this);
1698 _survivor_pool = new G1SurvivorPool(this);
1699 _old_pool = new G1OldGenPool(this);
1700
1701 _full_gc_memory_manager.add_pool(_eden_pool);
1702 _full_gc_memory_manager.add_pool(_survivor_pool);
1703 _full_gc_memory_manager.add_pool(_old_pool);
1704
1705 _memory_manager.add_pool(_eden_pool);
1706 _memory_manager.add_pool(_survivor_pool);
1707
1708 }
1709
1710 void G1CollectedHeap::stop() {
1711 // Stop all concurrent threads. We do this to make sure these threads
1712 // do not continue to execute and access resources (e.g. logging)
1713 // that are destroyed during shutdown.
1714 _cr->stop();
1715 _young_gen_sampling_thread->stop();
1716 _cm_thread->stop();
1717 if (G1StringDedup::is_enabled()) {
1718 G1StringDedup::stop();
1719 }
1720 }
1721
1722 void G1CollectedHeap::safepoint_synchronize_begin() {
1723 SuspendibleThreadSet::synchronize();
1724 }
1725
1726 void G1CollectedHeap::safepoint_synchronize_end() {
1727 SuspendibleThreadSet::desynchronize();
1728 }
1729
1730 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1731 return HeapRegion::max_region_size();
1732 }
1733
1734 void G1CollectedHeap::post_initialize() {
1735 CollectedHeap::post_initialize();
|
1949 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1950 "for inner caller (Full GC): _old_marking_cycles_started = %u "
1951 "is inconsistent with _old_marking_cycles_completed = %u",
1952 _old_marking_cycles_started, _old_marking_cycles_completed);
1953
1954 // This is the case for the outer caller, i.e. the concurrent cycle.
1955 assert(!concurrent ||
1956 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1957 "for outer caller (concurrent cycle): "
1958 "_old_marking_cycles_started = %u "
1959 "is inconsistent with _old_marking_cycles_completed = %u",
1960 _old_marking_cycles_started, _old_marking_cycles_completed);
1961
1962 _old_marking_cycles_completed += 1;
1963
1964 // We need to clear the "in_progress" flag in the CM thread before
1965 // we wake up any waiters (especially when ExplicitInvokesConcurrent
1966 // is set) so that if a waiter requests another System.gc() it doesn't
1967 // incorrectly see that a marking cycle is still in progress.
1968 if (concurrent) {
1969 _cmThread->set_idle();
1970 }
1971
1972 // This notify_all() will ensure that a thread that called
1973 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
1974 // and it's waiting for a full GC to finish will be woken up. It is
1975 // waiting in VM_G1CollectForAllocation::doit_epilogue().
1976 FullGCCount_lock->notify_all();
1977 }
1978
1979 void G1CollectedHeap::collect(GCCause::Cause cause) {
1980 assert_heap_not_locked();
1981
1982 uint gc_count_before;
1983 uint old_marking_count_before;
1984 uint full_gc_count_before;
1985 bool retry_gc;
1986
1987 do {
1988 retry_gc = false;
|
1949 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1950 "for inner caller (Full GC): _old_marking_cycles_started = %u "
1951 "is inconsistent with _old_marking_cycles_completed = %u",
1952 _old_marking_cycles_started, _old_marking_cycles_completed);
1953
1954 // This is the case for the outer caller, i.e. the concurrent cycle.
1955 assert(!concurrent ||
1956 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1957 "for outer caller (concurrent cycle): "
1958 "_old_marking_cycles_started = %u "
1959 "is inconsistent with _old_marking_cycles_completed = %u",
1960 _old_marking_cycles_started, _old_marking_cycles_completed);
1961
1962 _old_marking_cycles_completed += 1;
1963
1964 // We need to clear the "in_progress" flag in the CM thread before
1965 // we wake up any waiters (especially when ExplicitInvokesConcurrent
1966 // is set) so that if a waiter requests another System.gc() it doesn't
1967 // incorrectly see that a marking cycle is still in progress.
1968 if (concurrent) {
1969 _cm_thread->set_idle();
1970 }
1971
1972 // This notify_all() will ensure that a thread that called
1973 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
1974 // and it's waiting for a full GC to finish will be woken up. It is
1975 // waiting in VM_G1CollectForAllocation::doit_epilogue().
1976 FullGCCount_lock->notify_all();
1977 }
1978
1979 void G1CollectedHeap::collect(GCCause::Cause cause) {
1980 assert_heap_not_locked();
1981
1982 uint gc_count_before;
1983 uint old_marking_count_before;
1984 uint full_gc_count_before;
1985 bool retry_gc;
1986
1987 do {
1988 retry_gc = false;
|
2160 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2161 ". returning zero instead.", ret_val);
2162 return 0;
2163 }
2164 return ret_val;
2165 }
2166
2167 void G1CollectedHeap::prepare_for_verify() {
2168 _verifier->prepare_for_verify();
2169 }
2170
2171 void G1CollectedHeap::verify(VerifyOption vo) {
2172 _verifier->verify(vo);
2173 }
2174
2175 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2176 return true;
2177 }
2178
2179 const char* const* G1CollectedHeap::concurrent_phases() const {
2180 return _cmThread->concurrent_phases();
2181 }
2182
2183 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2184 return _cmThread->request_concurrent_phase(phase);
2185 }
2186
2187 class PrintRegionClosure: public HeapRegionClosure {
2188 outputStream* _st;
2189 public:
2190 PrintRegionClosure(outputStream* st) : _st(st) {}
2191 bool do_heap_region(HeapRegion* r) {
2192 r->print_on(_st);
2193 return false;
2194 }
2195 };
2196
2197 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2198 const HeapRegion* hr,
2199 const VerifyOption vo) const {
2200 switch (vo) {
2201 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2202 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2203 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
|
2160 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2161 ". returning zero instead.", ret_val);
2162 return 0;
2163 }
2164 return ret_val;
2165 }
2166
2167 void G1CollectedHeap::prepare_for_verify() {
2168 _verifier->prepare_for_verify();
2169 }
2170
2171 void G1CollectedHeap::verify(VerifyOption vo) {
2172 _verifier->verify(vo);
2173 }
2174
2175 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2176 return true;
2177 }
2178
2179 const char* const* G1CollectedHeap::concurrent_phases() const {
2180 return _cm_thread->concurrent_phases();
2181 }
2182
2183 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2184 return _cm_thread->request_concurrent_phase(phase);
2185 }
2186
2187 class PrintRegionClosure: public HeapRegionClosure {
2188 outputStream* _st;
2189 public:
2190 PrintRegionClosure(outputStream* st) : _st(st) {}
2191 bool do_heap_region(HeapRegion* r) {
2192 r->print_on(_st);
2193 return false;
2194 }
2195 };
2196
2197 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2198 const HeapRegion* hr,
2199 const VerifyOption vo) const {
2200 switch (vo) {
2201 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2202 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2203 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
|
2254 }
2255
2256 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2257 print_on(st);
2258
2259 // Print the per-region information.
2260 print_regions_on(st);
2261 }
2262
2263 void G1CollectedHeap::print_on_error(outputStream* st) const {
2264 this->CollectedHeap::print_on_error(st);
2265
2266 if (_cm != NULL) {
2267 st->cr();
2268 _cm->print_on_error(st);
2269 }
2270 }
2271
2272 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2273 workers()->print_worker_threads_on(st);
2274 _cmThread->print_on(st);
2275 st->cr();
2276 _cm->print_worker_threads_on(st);
2277 _cr->print_threads_on(st);
2278 _young_gen_sampling_thread->print_on(st);
2279 if (G1StringDedup::is_enabled()) {
2280 G1StringDedup::print_worker_threads_on(st);
2281 }
2282 }
2283
2284 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2285 workers()->threads_do(tc);
2286 tc->do_thread(_cmThread);
2287 _cm->threads_do(tc);
2288 _cr->threads_do(tc);
2289 tc->do_thread(_young_gen_sampling_thread);
2290 if (G1StringDedup::is_enabled()) {
2291 G1StringDedup::threads_do(tc);
2292 }
2293 }
2294
2295 void G1CollectedHeap::print_tracing_info() const {
2296 g1_rem_set()->print_summary_info();
2297 concurrent_mark()->print_summary_info();
2298 }
2299
2300 #ifndef PRODUCT
2301 // Helpful for debugging RSet issues.
2302
2303 class PrintRSetsClosure : public HeapRegionClosure {
2304 private:
2305 const char* _msg;
|
2254 }
2255
2256 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2257 print_on(st);
2258
2259 // Print the per-region information.
2260 print_regions_on(st);
2261 }
2262
2263 void G1CollectedHeap::print_on_error(outputStream* st) const {
2264 this->CollectedHeap::print_on_error(st);
2265
2266 if (_cm != NULL) {
2267 st->cr();
2268 _cm->print_on_error(st);
2269 }
2270 }
2271
2272 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2273 workers()->print_worker_threads_on(st);
2274 _cm_thread->print_on(st);
2275 st->cr();
2276 _cm->print_worker_threads_on(st);
2277 _cr->print_threads_on(st);
2278 _young_gen_sampling_thread->print_on(st);
2279 if (G1StringDedup::is_enabled()) {
2280 G1StringDedup::print_worker_threads_on(st);
2281 }
2282 }
2283
2284 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2285 workers()->threads_do(tc);
2286 tc->do_thread(_cm_thread);
2287 _cm->threads_do(tc);
2288 _cr->threads_do(tc);
2289 tc->do_thread(_young_gen_sampling_thread);
2290 if (G1StringDedup::is_enabled()) {
2291 G1StringDedup::threads_do(tc);
2292 }
2293 }
2294
2295 void G1CollectedHeap::print_tracing_info() const {
2296 g1_rem_set()->print_summary_info();
2297 concurrent_mark()->print_summary_info();
2298 }
2299
2300 #ifndef PRODUCT
2301 // Helpful for debugging RSet issues.
2302
2303 class PrintRSetsClosure : public HeapRegionClosure {
2304 private:
2305 const char* _msg;
|
2437 assert_heap_not_locked_and_not_at_safepoint();
2438 VM_G1CollectForAllocation op(word_size,
2439 gc_count_before,
2440 gc_cause,
2441 false, /* should_initiate_conc_mark */
2442 g1_policy()->max_pause_time_ms());
2443 VMThread::execute(&op);
2444
2445 HeapWord* result = op.result();
2446 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2447 assert(result == NULL || ret_succeeded,
2448 "the result should be NULL if the VM did not succeed");
2449 *succeeded = ret_succeeded;
2450
2451 assert_heap_not_locked();
2452 return result;
2453 }
2454
2455 void G1CollectedHeap::do_concurrent_mark() {
2456 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2457 if (!_cmThread->in_progress()) {
2458 _cmThread->set_started();
2459 CGC_lock->notify();
2460 }
2461 }
2462
2463 size_t G1CollectedHeap::pending_card_num() {
2464 size_t extra_cards = 0;
2465 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2466 DirtyCardQueue& dcq = curr->dirty_card_queue();
2467 extra_cards += dcq.size();
2468 }
2469 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2470 size_t buffer_size = dcqs.buffer_size();
2471 size_t buffer_num = dcqs.completed_buffers_num();
2472
2473 return buffer_size * buffer_num + extra_cards;
2474 }
2475
2476 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2477 // We don't nominate objects with many remembered set entries, on
|
2437 assert_heap_not_locked_and_not_at_safepoint();
2438 VM_G1CollectForAllocation op(word_size,
2439 gc_count_before,
2440 gc_cause,
2441 false, /* should_initiate_conc_mark */
2442 g1_policy()->max_pause_time_ms());
2443 VMThread::execute(&op);
2444
2445 HeapWord* result = op.result();
2446 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2447 assert(result == NULL || ret_succeeded,
2448 "the result should be NULL if the VM did not succeed");
2449 *succeeded = ret_succeeded;
2450
2451 assert_heap_not_locked();
2452 return result;
2453 }
2454
2455 void G1CollectedHeap::do_concurrent_mark() {
2456 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2457 if (!_cm_thread->in_progress()) {
2458 _cm_thread->set_started();
2459 CGC_lock->notify();
2460 }
2461 }
2462
2463 size_t G1CollectedHeap::pending_card_num() {
2464 size_t extra_cards = 0;
2465 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2466 DirtyCardQueue& dcq = curr->dirty_card_queue();
2467 extra_cards += dcq.size();
2468 }
2469 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2470 size_t buffer_size = dcqs.buffer_size();
2471 size_t buffer_num = dcqs.completed_buffers_num();
2472
2473 return buffer_size * buffer_num + extra_cards;
2474 }
2475
2476 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2477 // We don't nominate objects with many remembered set entries, on
|
2734 _gc_timer_stw->register_gc_start();
2735
2736 GCIdMark gc_id_mark;
2737 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2738
2739 SvcGCMarker sgcm(SvcGCMarker::MINOR);
2740 ResourceMark rm;
2741
2742 g1_policy()->note_gc_start();
2743
2744 wait_for_root_region_scanning();
2745
2746 print_heap_before_gc();
2747 print_heap_regions();
2748 trace_heap_before_gc(_gc_tracer_stw);
2749
2750 _verifier->verify_region_sets_optional();
2751 _verifier->verify_dirty_young_regions();
2752
2753 // We should not be doing initial mark unless the conc mark thread is running
2754 if (!_cmThread->should_terminate()) {
2755 // This call will decide whether this pause is an initial-mark
2756 // pause. If it is, in_initial_mark_gc() will return true
2757 // for the duration of this pause.
2758 g1_policy()->decide_on_conc_mark_initiation();
2759 }
2760
2761 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2762 assert(!collector_state()->in_initial_mark_gc() ||
2763 collector_state()->in_young_only_phase(), "sanity");
2764
2765 // We also do not allow mixed GCs during marking.
2766 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2767
2768 // Record whether this pause is an initial mark. When the current
2769 // thread has completed its logging output and it's safe to signal
2770 // the CM thread, the flag's value in the policy has been reset.
2771 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2772
2773 // Inner scope for scope based logging, timers, and stats collection
|
2734 _gc_timer_stw->register_gc_start();
2735
2736 GCIdMark gc_id_mark;
2737 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2738
2739 SvcGCMarker sgcm(SvcGCMarker::MINOR);
2740 ResourceMark rm;
2741
2742 g1_policy()->note_gc_start();
2743
2744 wait_for_root_region_scanning();
2745
2746 print_heap_before_gc();
2747 print_heap_regions();
2748 trace_heap_before_gc(_gc_tracer_stw);
2749
2750 _verifier->verify_region_sets_optional();
2751 _verifier->verify_dirty_young_regions();
2752
2753 // We should not be doing initial mark unless the conc mark thread is running
2754 if (!_cm_thread->should_terminate()) {
2755 // This call will decide whether this pause is an initial-mark
2756 // pause. If it is, in_initial_mark_gc() will return true
2757 // for the duration of this pause.
2758 g1_policy()->decide_on_conc_mark_initiation();
2759 }
2760
2761 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2762 assert(!collector_state()->in_initial_mark_gc() ||
2763 collector_state()->in_young_only_phase(), "sanity");
2764
2765 // We also do not allow mixed GCs during marking.
2766 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2767
2768 // Record whether this pause is an initial mark. When the current
2769 // thread has completed its logging output and it's safe to signal
2770 // the CM thread, the flag's value in the policy has been reset.
2771 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2772
2773 // Inner scope for scope based logging, timers, and stats collection
|