1463
1464 if (SafepointSynchronize::is_at_safepoint()) {
1465 guarantee(Thread::current()->is_VM_thread() ||
1466 OldSets_lock->owned_by_self(),
1467 "master humongous set MT safety protocol at a safepoint");
1468 } else {
1469 guarantee(Heap_lock->owned_by_self(),
1470 "master humongous set MT safety protocol outside a safepoint");
1471 }
1472 }
1473 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1474 const char* get_description() { return "Humongous Regions"; }
1475 };
1476
1477 G1CollectedHeap::G1CollectedHeap() :
1478 CollectedHeap(),
1479 _young_gen_sampling_thread(NULL),
1480 _workers(NULL),
1481 _card_table(NULL),
1482 _soft_ref_policy(),
1483 _old_set("Old Region Set", new OldRegionSetChecker()),
1484 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1485 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1486 _bot(NULL),
1487 _listener(),
1488 _numa(G1NUMA::create()),
1489 _hrm(NULL),
1490 _allocator(NULL),
1491 _verifier(NULL),
1492 _summary_bytes_used(0),
1493 _bytes_used_during_gc(0),
1494 _archive_allocator(NULL),
1495 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1496 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1497 _expand_heap_after_alloc_failure(true),
1498 _g1mm(NULL),
1499 _humongous_reclaim_candidates(),
1500 _has_humongous_reclaim_candidates(false),
1501 _hr_printer(),
1502 _collector_state(),
2414 // must be equal to the humongous object limit.
2415 size_t G1CollectedHeap::max_tlab_size() const {
2416 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2417 }
2418
2419 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2420 return _allocator->unsafe_max_tlab_alloc();
2421 }
2422
2423 size_t G1CollectedHeap::max_capacity() const {
2424 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2425 }
2426
2427 size_t G1CollectedHeap::max_reserved_capacity() const {
2428 return _hrm->max_length() * HeapRegion::GrainBytes;
2429 }
2430
2431 jlong G1CollectedHeap::millis_since_last_gc() {
2432 // See the notes in GenCollectedHeap::millis_since_last_gc()
2433 // for more information about the implementation.
2434 jlong ret_val = (os::javaTimeNanos() - _policy->time_of_last_gc()) /
2435 NANOSECS_PER_MILLISEC;
2436 if (ret_val < 0) {
2437 NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
2438 return 0;
2439 }
2440 return ret_val;
2441 }
2442
2443 void G1CollectedHeap::deduplicate_string(oop str) {
2444 assert(java_lang_String::is_instance(str), "invariant");
2445
2446 if (G1StringDedup::is_enabled()) {
2447 G1StringDedup::deduplicate(str);
2448 }
2449 }
2450
2451 void G1CollectedHeap::prepare_for_verify() {
2452 _verifier->prepare_for_verify();
2453 }
2454
2709 rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2710
2711 // FIXME: what is this about?
2712 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2713 // is set.
2714 #if COMPILER2_OR_JVMCI
2715 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2716 #endif
2717
2718 double start = os::elapsedTime();
2719 resize_all_tlabs();
2720 phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2721
2722 MemoryService::track_memory_usage();
2723 // We have just completed a GC. Update the soft reference
2724 // policy with the new heap occupancy
2725 Universe::update_heap_info_at_gc();
2726
2727 // Print NUMA statistics.
2728 _numa->print_statistics();
2729 }
2730
2731 void G1CollectedHeap::verify_numa_regions(const char* desc) {
2732 LogTarget(Trace, gc, heap, verify) lt;
2733
2734 if (lt.is_enabled()) {
2735 LogStream ls(lt);
2736 // Iterate all heap regions to print matching between preferred numa id and actual numa id.
2737 G1NodeIndexCheckClosure cl(desc, _numa, &ls);
2738 heap_region_iterate(&cl);
2739 }
2740 }
2741
2742 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2743 uint gc_count_before,
2744 bool* succeeded,
2745 GCCause::Cause gc_cause) {
2746 assert_heap_not_locked_and_not_at_safepoint();
2747 VM_G1CollectForAllocation op(word_size,
2748 gc_count_before,
3103
3104 if (should_start_conc_mark) {
3105 // We have to do this before we notify the CM threads that
3106 // they can start working to make sure that all the
3107 // appropriate initialization is done on the CM object.
3108 concurrent_mark()->post_initial_mark();
3109 // Note that we don't actually trigger the CM thread at
3110 // this point. We do that later when we're sure that
3111 // the current thread has completed its logging output.
3112 }
3113
3114 allocate_dummy_regions();
3115
3116 _allocator->init_mutator_alloc_regions();
3117
3118 expand_heap_after_young_collection();
3119
3120 double sample_end_time_sec = os::elapsedTime();
3121 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3122 policy()->record_collection_pause_end(pause_time_ms);
3123 }
3124
3125 verify_after_young_collection(verify_type);
3126
3127 gc_epilogue(false);
3128 }
3129
3130 // Print the remainder of the GC log output.
3131 if (evacuation_failed()) {
3132 log_info(gc)("To-space exhausted");
3133 }
3134
3135 policy()->print_phases();
3136 heap_transition.print();
3137
3138 _hrm->verify_optional();
3139 _verifier->verify_region_sets_optional();
3140
3141 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3142 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
|
1463
1464 if (SafepointSynchronize::is_at_safepoint()) {
1465 guarantee(Thread::current()->is_VM_thread() ||
1466 OldSets_lock->owned_by_self(),
1467 "master humongous set MT safety protocol at a safepoint");
1468 } else {
1469 guarantee(Heap_lock->owned_by_self(),
1470 "master humongous set MT safety protocol outside a safepoint");
1471 }
1472 }
1473 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1474 const char* get_description() { return "Humongous Regions"; }
1475 };
1476
1477 G1CollectedHeap::G1CollectedHeap() :
1478 CollectedHeap(),
1479 _young_gen_sampling_thread(NULL),
1480 _workers(NULL),
1481 _card_table(NULL),
1482 _soft_ref_policy(),
1483 _collection_pause_end(Ticks::now()),
1484 _time_of_last_gc_ns(os::javaTimeNanos()),
1485 _old_set("Old Region Set", new OldRegionSetChecker()),
1486 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1487 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1488 _bot(NULL),
1489 _listener(),
1490 _numa(G1NUMA::create()),
1491 _hrm(NULL),
1492 _allocator(NULL),
1493 _verifier(NULL),
1494 _summary_bytes_used(0),
1495 _bytes_used_during_gc(0),
1496 _archive_allocator(NULL),
1497 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1498 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1499 _expand_heap_after_alloc_failure(true),
1500 _g1mm(NULL),
1501 _humongous_reclaim_candidates(),
1502 _has_humongous_reclaim_candidates(false),
1503 _hr_printer(),
1504 _collector_state(),
2416 // must be equal to the humongous object limit.
2417 size_t G1CollectedHeap::max_tlab_size() const {
2418 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2419 }
2420
2421 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2422 return _allocator->unsafe_max_tlab_alloc();
2423 }
2424
2425 size_t G1CollectedHeap::max_capacity() const {
2426 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2427 }
2428
2429 size_t G1CollectedHeap::max_reserved_capacity() const {
2430 return _hrm->max_length() * HeapRegion::GrainBytes;
2431 }
2432
2433 jlong G1CollectedHeap::millis_since_last_gc() {
2434 // See the notes in GenCollectedHeap::millis_since_last_gc()
2435 // for more information about the implementation.
2436 jlong ret_val = (os::javaTimeNanos() - _time_of_last_gc_ns) /
2437 NANOSECS_PER_MILLISEC;
2438 if (ret_val < 0) {
2439 NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
2440 return 0;
2441 }
2442 return ret_val;
2443 }
2444
2445 void G1CollectedHeap::deduplicate_string(oop str) {
2446 assert(java_lang_String::is_instance(str), "invariant");
2447
2448 if (G1StringDedup::is_enabled()) {
2449 G1StringDedup::deduplicate(str);
2450 }
2451 }
2452
2453 void G1CollectedHeap::prepare_for_verify() {
2454 _verifier->prepare_for_verify();
2455 }
2456
2711 rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2712
2713 // FIXME: what is this about?
2714 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2715 // is set.
2716 #if COMPILER2_OR_JVMCI
2717 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2718 #endif
2719
2720 double start = os::elapsedTime();
2721 resize_all_tlabs();
2722 phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2723
2724 MemoryService::track_memory_usage();
2725 // We have just completed a GC. Update the soft reference
2726 // policy with the new heap occupancy
2727 Universe::update_heap_info_at_gc();
2728
2729 // Print NUMA statistics.
2730 _numa->print_statistics();
2731
2732 _collection_pause_end = Ticks::now();
2733 }
2734
2735 void G1CollectedHeap::verify_numa_regions(const char* desc) {
2736 LogTarget(Trace, gc, heap, verify) lt;
2737
2738 if (lt.is_enabled()) {
2739 LogStream ls(lt);
2740 // Iterate all heap regions to print matching between preferred numa id and actual numa id.
2741 G1NodeIndexCheckClosure cl(desc, _numa, &ls);
2742 heap_region_iterate(&cl);
2743 }
2744 }
2745
2746 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2747 uint gc_count_before,
2748 bool* succeeded,
2749 GCCause::Cause gc_cause) {
2750 assert_heap_not_locked_and_not_at_safepoint();
2751 VM_G1CollectForAllocation op(word_size,
2752 gc_count_before,
3107
3108 if (should_start_conc_mark) {
3109 // We have to do this before we notify the CM threads that
3110 // they can start working to make sure that all the
3111 // appropriate initialization is done on the CM object.
3112 concurrent_mark()->post_initial_mark();
3113 // Note that we don't actually trigger the CM thread at
3114 // this point. We do that later when we're sure that
3115 // the current thread has completed its logging output.
3116 }
3117
3118 allocate_dummy_regions();
3119
3120 _allocator->init_mutator_alloc_regions();
3121
3122 expand_heap_after_young_collection();
3123
3124 double sample_end_time_sec = os::elapsedTime();
3125 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3126 policy()->record_collection_pause_end(pause_time_ms);
3127
3128 _time_of_last_gc_ns = os::javaTimeNanos();
3129 }
3130
3131 verify_after_young_collection(verify_type);
3132
3133 gc_epilogue(false);
3134 }
3135
3136 // Print the remainder of the GC log output.
3137 if (evacuation_failed()) {
3138 log_info(gc)("To-space exhausted");
3139 }
3140
3141 policy()->print_phases();
3142 heap_transition.print();
3143
3144 _hrm->verify_optional();
3145 _verifier->verify_region_sets_optional();
3146
3147 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3148 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
|