73 GCH_PS_jvmti_oops_do,
74 GCH_PS_CodeCache_oops_do,
75 GCH_PS_younger_gens,
76 // Leave this one last.
77 GCH_PS_NumElements
78 };
79
80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
81 SharedHeap(policy),
82 _rem_set(NULL),
83 _gen_policy(policy),
84 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
85 _full_collections_completed(0)
86 {
87 assert(policy != NULL, "Sanity check");
88 }
89
90 jint GenCollectedHeap::initialize() {
91 CollectedHeap::pre_initialize();
92
93 _n_gens = gen_policy()->number_of_generations();
94 assert(_n_gens == 2, "There is no support for more than two generations");
95
96 // While there are no constraints in the GC code that HeapWordSize
97 // be any particular value, there are multiple other areas in the
98 // system which believe this to be true (e.g. oop->object_size in some
99 // cases incorrectly returns the size in wordSize units rather than
100 // HeapWordSize).
101 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
102
103 // Allocate space for the heap.
104
105 char* heap_address;
106 ReservedSpace heap_rs;
107
108 size_t heap_alignment = collector_policy()->heap_alignment();
109
110 heap_address = allocate(heap_alignment, &heap_rs);
111
112 if (!heap_rs.is_reserved()) {
113 vm_shutdown_during_initialization(
114 "Could not reserve enough space for object heap");
115 return JNI_ENOMEM;
183 def_new_gen->from()->capacity());
184 policy->initialize_gc_policy_counters();
185 }
186
187 void GenCollectedHeap::ref_processing_init() {
188 SharedHeap::ref_processing_init();
189 _young_gen->ref_processor_init();
190 _old_gen->ref_processor_init();
191 }
192
193 size_t GenCollectedHeap::capacity() const {
194 return _young_gen->capacity() + _old_gen->capacity();
195 }
196
197 size_t GenCollectedHeap::used() const {
198 return _young_gen->used() + _old_gen->used();
199 }
200
201 // Save the "used_region" for generations level and lower.
202 void GenCollectedHeap::save_used_regions(int level) {
203 assert(level >= 0, "Illegal level parameter");
204 assert(level < _n_gens, "Illegal level parameter");
205 if (level == 1) {
206 _old_gen->save_used_region();
207 }
208 _young_gen->save_used_region();
209 }
210
211 size_t GenCollectedHeap::max_capacity() const {
212 return _young_gen->max_capacity() + _old_gen->max_capacity();
213 }
214
215 // Update the _full_collections_completed counter
216 // at the end of a stop-world full GC.
217 unsigned int GenCollectedHeap::update_full_collections_completed() {
218 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
219 assert(_full_collections_completed <= _total_full_collections,
220 "Can't complete more collections than were started");
221 _full_collections_completed = _total_full_collections;
222 ml.notify_all();
223 return _full_collections_completed;
224 }
400 gclog_or_tty->print(":");
401 gen->print_heap_change(prev_used);
402 }
403 }
404
405 void GenCollectedHeap::do_collection(bool full,
406 bool clear_all_soft_refs,
407 size_t size,
408 bool is_tlab,
409 int max_level) {
410 ResourceMark rm;
411 DEBUG_ONLY(Thread* my_thread = Thread::current();)
412
413 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
414 assert(my_thread->is_VM_thread() ||
415 my_thread->is_ConcurrentGC_thread(),
416 "incorrect thread type capability");
417 assert(Heap_lock->is_locked(),
418 "the requesting thread should have the Heap_lock");
419 guarantee(!is_gc_active(), "collection is not reentrant");
420 assert(max_level < n_gens(), "sanity check");
421
422 if (GC_locker::check_active_before_gc()) {
423 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
424 }
425
426 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
427 collector_policy()->should_clear_all_soft_refs();
428
429 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
430
431 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
432
433 print_heap_before_gc();
434
435 {
436 FlagSetting fl(_is_gc_active, true);
437
438 bool complete = full && (max_level == (n_gens()-1));
439 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
440 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
441 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
442 // so we can assume here that the next GC id is what we want.
443 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
444
445 gc_prologue(complete);
446 increment_total_collections(complete);
447
448 size_t gch_prev_used = used();
449 bool run_verification = total_collections() >= VerifyGCStartAt;
450
451 bool prepared_for_verification = false;
452 int max_level_collected = 0;
453 bool old_collects_young = (max_level == 1) &&
454 full &&
455 _old_gen->full_collects_younger_generations();
456 if (!old_collects_young &&
457 _young_gen->should_collect(full, size, is_tlab)) {
458 if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
490 VerifyGCLevel <= 1 && VerifyBeforeGC) {
491 prepare_for_verify();
492 }
493
494 assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
495 collect_generation(_old_gen,
496 full,
497 size,
498 is_tlab,
499 run_verification && VerifyGCLevel <= 1,
500 do_clear_all_soft_refs,
501 true);
502
503 must_restore_marks_for_biased_locking = true;
504 max_level_collected = 1;
505 }
506
507 // Update "complete" boolean wrt what actually transpired --
508 // for instance, a promotion failure could have led to
509 // a whole heap collection.
510 complete = complete || (max_level_collected == n_gens() - 1);
511
512 if (complete) { // We did a "major" collection
513 // FIXME: See comment at pre_full_gc_dump call
514 post_full_gc_dump(NULL); // do any post full gc dumps
515 }
516
517 if (PrintGCDetails) {
518 print_heap_change(gch_prev_used);
519
520 // Print metaspace info for full GC with PrintGCDetails flag.
521 if (complete) {
522 MetaspaceAux::print_metaspace_change(metadata_prev_used);
523 }
524 }
525
526 // Adjust generation sizes.
527 if (max_level_collected == 1) {
528 _old_gen->compute_new_size();
529 }
530 _young_gen->compute_new_size();
531
532 if (complete) {
533 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
534 ClassLoaderDataGraph::purge();
535 MetaspaceAux::verify_metrics();
536 // Resize the metaspace capacity after full collections
537 MetaspaceGC::compute_new_size();
538 update_full_collections_completed();
539 }
540
541 // Track memory usage and detect low memory after GC finishes
542 MemoryService::track_memory_usage();
543
544 gc_epilogue(complete);
545
546 if (must_restore_marks_for_biased_locking) {
547 BiasedLocking::restore_marks();
754 HeapWord** GenCollectedHeap::top_addr() const {
755 return _young_gen->top_addr();
756 }
757
758 HeapWord** GenCollectedHeap::end_addr() const {
759 return _young_gen->end_addr();
760 }
761
762 // public collection interfaces
763
764 void GenCollectedHeap::collect(GCCause::Cause cause) {
765 if (should_do_concurrent_full_gc(cause)) {
766 #if INCLUDE_ALL_GCS
767 // mostly concurrent full collection
768 collect_mostly_concurrent(cause);
769 #else // INCLUDE_ALL_GCS
770 ShouldNotReachHere();
771 #endif // INCLUDE_ALL_GCS
772 } else if (cause == GCCause::_wb_young_gc) {
773 // minor collection for WhiteBox API
774 collect(cause, 0);
775 } else {
776 #ifdef ASSERT
777 if (cause == GCCause::_scavenge_alot) {
778 // minor collection only
779 collect(cause, 0);
780 } else {
781 // Stop-the-world full collection
782 collect(cause, n_gens() - 1);
783 }
784 #else
785 // Stop-the-world full collection
786 collect(cause, n_gens() - 1);
787 #endif
788 }
789 }
790
791 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
792 // The caller doesn't have the Heap_lock
793 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
794 MutexLocker ml(Heap_lock);
795 collect_locked(cause, max_level);
796 }
797
798 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
799 // The caller has the Heap_lock
800 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
801 collect_locked(cause, n_gens() - 1);
802 }
803
804 // this is the private collection interface
805 // The Heap_lock is expected to be held on entry.
806
807 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
808 // Read the GC count while holding the Heap_lock
809 unsigned int gc_count_before = total_collections();
810 unsigned int full_gc_count_before = total_full_collections();
811 {
812 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
813 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
814 cause, max_level);
815 VMThread::execute(&op);
816 }
817 }
818
819 #if INCLUDE_ALL_GCS
820 bool GenCollectedHeap::create_cms_collector() {
821
837 }
838 return true; // success
839 }
840
841 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
842 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
843
844 MutexLocker ml(Heap_lock);
845 // Read the GC counts while holding the Heap_lock
846 unsigned int full_gc_count_before = total_full_collections();
847 unsigned int gc_count_before = total_collections();
848 {
849 MutexUnlocker mu(Heap_lock);
850 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
851 VMThread::execute(&op);
852 }
853 }
854 #endif // INCLUDE_ALL_GCS
855
856 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
857 do_full_collection(clear_all_soft_refs, _n_gens - 1);
858 }
859
860 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
861 int max_level) {
862 int local_max_level;
863 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
864 gc_cause() == GCCause::_gc_locker) {
865 local_max_level = 0;
866 } else {
867 local_max_level = max_level;
868 }
869
870 do_collection(true /* full */,
871 clear_all_soft_refs /* clear_all_soft_refs */,
872 0 /* size */,
873 false /* is_tlab */,
874 local_max_level /* max_level */);
875 // Hack XXX FIX ME !!!
876 // A scavenge may not have been attempted, or may have
877 // been attempted and failed, because the old gen was too full
878 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
879 incremental_collection_will_fail(false /* don't consult_young */)) {
880 if (PrintGCDetails) {
881 gclog_or_tty->print_cr("GC locker: Trying a full collection "
882 "because scavenge failed");
883 }
884 // This time allow the old gen to be collected as well
885 do_collection(true /* full */,
886 clear_all_soft_refs /* clear_all_soft_refs */,
887 0 /* size */,
888 false /* is_tlab */,
889 n_gens() - 1 /* max_level */);
890 }
891 }
892
893 bool GenCollectedHeap::is_in_young(oop p) {
894 bool result = ((HeapWord*)p) < _old_gen->reserved().start();
895 assert(result == _young_gen->is_in_reserved(p),
896 err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
897 return result;
898 }
899
900 // Returns "TRUE" iff "p" points into the committed areas of the heap.
901 bool GenCollectedHeap::is_in(const void* p) const {
902 #ifndef ASSERT
903 guarantee(VerifyBeforeGC ||
904 VerifyDuringGC ||
905 VerifyBeforeExit ||
906 VerifyDuringStartup ||
907 PrintAssembly ||
908 tty->count() != 0 || // already printing
909 VerifyAfterGC ||
1095 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1096 _young_gen->space_iterate(cl, true);
1097 _old_gen->space_iterate(cl, true);
1098 }
1099
1100 bool GenCollectedHeap::is_maximal_no_gc() const {
1101 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1102 }
1103
1104 void GenCollectedHeap::save_marks() {
1105 _young_gen->save_marks();
1106 _old_gen->save_marks();
1107 }
1108
1109 GenCollectedHeap* GenCollectedHeap::heap() {
1110 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1111 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1112 return _gch;
1113 }
1114
1115
1116 void GenCollectedHeap::prepare_for_compaction() {
1117 guarantee(_n_gens = 2, "Wrong number of generations");
1118 // Start by compacting into same gen.
1119 CompactPoint cp(_old_gen);
1120 _old_gen->prepare_for_compaction(&cp);
1121 _young_gen->prepare_for_compaction(&cp);
1122 }
1123
1124 GCStats* GenCollectedHeap::gc_stats(int level) const {
1125 if (level == 0) {
1126 return _young_gen->gc_stats();
1127 } else {
1128 return _old_gen->gc_stats();
1129 }
1130 }
1131
1132 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1133 if (!silent) {
1134 gclog_or_tty->print("%s", _old_gen->name());
1135 gclog_or_tty->print(" ");
1136 }
1137 _old_gen->verify();
|
73 GCH_PS_jvmti_oops_do,
74 GCH_PS_CodeCache_oops_do,
75 GCH_PS_younger_gens,
76 // Leave this one last.
77 GCH_PS_NumElements
78 };
79
80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
81 SharedHeap(policy),
82 _rem_set(NULL),
83 _gen_policy(policy),
84 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
85 _full_collections_completed(0)
86 {
87 assert(policy != NULL, "Sanity check");
88 }
89
90 jint GenCollectedHeap::initialize() {
91 CollectedHeap::pre_initialize();
92
93 // While there are no constraints in the GC code that HeapWordSize
94 // be any particular value, there are multiple other areas in the
95 // system which believe this to be true (e.g. oop->object_size in some
96 // cases incorrectly returns the size in wordSize units rather than
97 // HeapWordSize).
98 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
99
100 // Allocate space for the heap.
101
102 char* heap_address;
103 ReservedSpace heap_rs;
104
105 size_t heap_alignment = collector_policy()->heap_alignment();
106
107 heap_address = allocate(heap_alignment, &heap_rs);
108
109 if (!heap_rs.is_reserved()) {
110 vm_shutdown_during_initialization(
111 "Could not reserve enough space for object heap");
112 return JNI_ENOMEM;
180 def_new_gen->from()->capacity());
181 policy->initialize_gc_policy_counters();
182 }
183
184 void GenCollectedHeap::ref_processing_init() {
185 SharedHeap::ref_processing_init();
186 _young_gen->ref_processor_init();
187 _old_gen->ref_processor_init();
188 }
189
190 size_t GenCollectedHeap::capacity() const {
191 return _young_gen->capacity() + _old_gen->capacity();
192 }
193
194 size_t GenCollectedHeap::used() const {
195 return _young_gen->used() + _old_gen->used();
196 }
197
198 // Save the "used_region" for generations level and lower.
199 void GenCollectedHeap::save_used_regions(int level) {
200 assert(level == 0 || level == 1, "Illegal level parameter");
201 if (level == 1) {
202 _old_gen->save_used_region();
203 }
204 _young_gen->save_used_region();
205 }
206
207 size_t GenCollectedHeap::max_capacity() const {
208 return _young_gen->max_capacity() + _old_gen->max_capacity();
209 }
210
211 // Update the _full_collections_completed counter
212 // at the end of a stop-world full GC.
213 unsigned int GenCollectedHeap::update_full_collections_completed() {
214 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
215 assert(_full_collections_completed <= _total_full_collections,
216 "Can't complete more collections than were started");
217 _full_collections_completed = _total_full_collections;
218 ml.notify_all();
219 return _full_collections_completed;
220 }
396 gclog_or_tty->print(":");
397 gen->print_heap_change(prev_used);
398 }
399 }
400
401 void GenCollectedHeap::do_collection(bool full,
402 bool clear_all_soft_refs,
403 size_t size,
404 bool is_tlab,
405 int max_level) {
406 ResourceMark rm;
407 DEBUG_ONLY(Thread* my_thread = Thread::current();)
408
409 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
410 assert(my_thread->is_VM_thread() ||
411 my_thread->is_ConcurrentGC_thread(),
412 "incorrect thread type capability");
413 assert(Heap_lock->is_locked(),
414 "the requesting thread should have the Heap_lock");
415 guarantee(!is_gc_active(), "collection is not reentrant");
416
417 if (GC_locker::check_active_before_gc()) {
418 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
419 }
420
421 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
422 collector_policy()->should_clear_all_soft_refs();
423
424 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
425
426 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
427
428 print_heap_before_gc();
429
430 {
431 FlagSetting fl(_is_gc_active, true);
432
433 bool complete = full && (max_level == 1 /* old */);
434 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
435 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
436 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
437 // so we can assume here that the next GC id is what we want.
438 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
439
440 gc_prologue(complete);
441 increment_total_collections(complete);
442
443 size_t gch_prev_used = used();
444 bool run_verification = total_collections() >= VerifyGCStartAt;
445
446 bool prepared_for_verification = false;
447 int max_level_collected = 0;
448 bool old_collects_young = (max_level == 1) &&
449 full &&
450 _old_gen->full_collects_younger_generations();
451 if (!old_collects_young &&
452 _young_gen->should_collect(full, size, is_tlab)) {
453 if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
485 VerifyGCLevel <= 1 && VerifyBeforeGC) {
486 prepare_for_verify();
487 }
488
489 assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
490 collect_generation(_old_gen,
491 full,
492 size,
493 is_tlab,
494 run_verification && VerifyGCLevel <= 1,
495 do_clear_all_soft_refs,
496 true);
497
498 must_restore_marks_for_biased_locking = true;
499 max_level_collected = 1;
500 }
501
502 // Update "complete" boolean wrt what actually transpired --
503 // for instance, a promotion failure could have led to
504 // a whole heap collection.
505 complete = complete || (max_level_collected == 1 /* old */);
506
507 if (complete) { // We did a "major" collection
508 // FIXME: See comment at pre_full_gc_dump call
509 post_full_gc_dump(NULL); // do any post full gc dumps
510 }
511
512 if (PrintGCDetails) {
513 print_heap_change(gch_prev_used);
514
515 // Print metaspace info for full GC with PrintGCDetails flag.
516 if (complete) {
517 MetaspaceAux::print_metaspace_change(metadata_prev_used);
518 }
519 }
520
521 // Adjust generation sizes.
522 if (max_level_collected == 1 /* old */) {
523 _old_gen->compute_new_size();
524 }
525 _young_gen->compute_new_size();
526
527 if (complete) {
528 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
529 ClassLoaderDataGraph::purge();
530 MetaspaceAux::verify_metrics();
531 // Resize the metaspace capacity after full collections
532 MetaspaceGC::compute_new_size();
533 update_full_collections_completed();
534 }
535
536 // Track memory usage and detect low memory after GC finishes
537 MemoryService::track_memory_usage();
538
539 gc_epilogue(complete);
540
541 if (must_restore_marks_for_biased_locking) {
542 BiasedLocking::restore_marks();
749 HeapWord** GenCollectedHeap::top_addr() const {
750 return _young_gen->top_addr();
751 }
752
753 HeapWord** GenCollectedHeap::end_addr() const {
754 return _young_gen->end_addr();
755 }
756
757 // public collection interfaces
758
759 void GenCollectedHeap::collect(GCCause::Cause cause) {
760 if (should_do_concurrent_full_gc(cause)) {
761 #if INCLUDE_ALL_GCS
762 // mostly concurrent full collection
763 collect_mostly_concurrent(cause);
764 #else // INCLUDE_ALL_GCS
765 ShouldNotReachHere();
766 #endif // INCLUDE_ALL_GCS
767 } else if (cause == GCCause::_wb_young_gc) {
768 // minor collection for WhiteBox API
769 collect(cause, 0 /* young */);
770 } else {
771 #ifdef ASSERT
772 if (cause == GCCause::_scavenge_alot) {
773 // minor collection only
774 collect(cause, 0 /* young */);
775 } else {
776 // Stop-the-world full collection
777 collect(cause, 1 /* old */);
778 }
779 #else
780 // Stop-the-world full collection
781 collect(cause, 1 /* old */);
782 #endif
783 }
784 }
785
786 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
787 // The caller doesn't have the Heap_lock
788 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
789 MutexLocker ml(Heap_lock);
790 collect_locked(cause, max_level);
791 }
792
793 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
794 // The caller has the Heap_lock
795 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
796 collect_locked(cause, 1 /* old */);
797 }
798
799 // this is the private collection interface
800 // The Heap_lock is expected to be held on entry.
801
802 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
803 // Read the GC count while holding the Heap_lock
804 unsigned int gc_count_before = total_collections();
805 unsigned int full_gc_count_before = total_full_collections();
806 {
807 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
808 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
809 cause, max_level);
810 VMThread::execute(&op);
811 }
812 }
813
814 #if INCLUDE_ALL_GCS
815 bool GenCollectedHeap::create_cms_collector() {
816
832 }
833 return true; // success
834 }
835
836 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
837 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
838
839 MutexLocker ml(Heap_lock);
840 // Read the GC counts while holding the Heap_lock
841 unsigned int full_gc_count_before = total_full_collections();
842 unsigned int gc_count_before = total_collections();
843 {
844 MutexUnlocker mu(Heap_lock);
845 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
846 VMThread::execute(&op);
847 }
848 }
849 #endif // INCLUDE_ALL_GCS
850
851 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
852 do_full_collection(clear_all_soft_refs, 1 /* old */);
853 }
854
855 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
856 int max_level) {
857 int local_max_level;
858 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
859 gc_cause() == GCCause::_gc_locker) {
860 local_max_level = 0;
861 } else {
862 local_max_level = max_level;
863 }
864
865 do_collection(true /* full */,
866 clear_all_soft_refs /* clear_all_soft_refs */,
867 0 /* size */,
868 false /* is_tlab */,
869 local_max_level /* max_level */);
870 // Hack XXX FIX ME !!!
871 // A scavenge may not have been attempted, or may have
872 // been attempted and failed, because the old gen was too full
873 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
874 incremental_collection_will_fail(false /* don't consult_young */)) {
875 if (PrintGCDetails) {
876 gclog_or_tty->print_cr("GC locker: Trying a full collection "
877 "because scavenge failed");
878 }
879 // This time allow the old gen to be collected as well
880 do_collection(true /* full */,
881 clear_all_soft_refs /* clear_all_soft_refs */,
882 0 /* size */,
883 false /* is_tlab */,
884 1 /* old */ /* max_level */);
885 }
886 }
887
888 bool GenCollectedHeap::is_in_young(oop p) {
889 bool result = ((HeapWord*)p) < _old_gen->reserved().start();
890 assert(result == _young_gen->is_in_reserved(p),
891 err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
892 return result;
893 }
894
895 // Returns "TRUE" iff "p" points into the committed areas of the heap.
896 bool GenCollectedHeap::is_in(const void* p) const {
897 #ifndef ASSERT
898 guarantee(VerifyBeforeGC ||
899 VerifyDuringGC ||
900 VerifyBeforeExit ||
901 VerifyDuringStartup ||
902 PrintAssembly ||
903 tty->count() != 0 || // already printing
904 VerifyAfterGC ||
1090 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1091 _young_gen->space_iterate(cl, true);
1092 _old_gen->space_iterate(cl, true);
1093 }
1094
1095 bool GenCollectedHeap::is_maximal_no_gc() const {
1096 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1097 }
1098
1099 void GenCollectedHeap::save_marks() {
1100 _young_gen->save_marks();
1101 _old_gen->save_marks();
1102 }
1103
1104 GenCollectedHeap* GenCollectedHeap::heap() {
1105 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1106 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1107 return _gch;
1108 }
1109
1110 void GenCollectedHeap::prepare_for_compaction() {
1111 // Start by compacting into same gen.
1112 CompactPoint cp(_old_gen);
1113 _old_gen->prepare_for_compaction(&cp);
1114 _young_gen->prepare_for_compaction(&cp);
1115 }
1116
1117 GCStats* GenCollectedHeap::gc_stats(int level) const {
1118 if (level == 0) {
1119 return _young_gen->gc_stats();
1120 } else {
1121 return _old_gen->gc_stats();
1122 }
1123 }
1124
1125 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1126 if (!silent) {
1127 gclog_or_tty->print("%s", _old_gen->name());
1128 gclog_or_tty->print(" ");
1129 }
1130 _old_gen->verify();
|