48 #include "memory/filemap.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/biasedLocking.hpp"
52 #include "runtime/handles.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "services/management.hpp"
57 #include "services/memoryService.hpp"
58 #include "utilities/debug.hpp"
59 #include "utilities/formatBuffer.hpp"
60 #include "utilities/macros.hpp"
61 #include "utilities/stack.inline.hpp"
62 #include "utilities/vmError.hpp"
63
64 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
65 CollectedHeap(),
66 _rem_set(NULL),
67 _gen_policy(policy),
68 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
69 _full_collections_completed(0)
70 {
71 assert(policy != NULL, "Sanity check");
72 }
73
74 jint GenCollectedHeap::initialize() {
75 // While there are no constraints in the GC code that HeapWordSize
76 // be any particular value, there are multiple other areas in the
77 // system which believe this to be true (e.g. oop->object_size in some
78 // cases incorrectly returns the size in wordSize units rather than
79 // HeapWordSize).
80 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
81
82 // Allocate space for the heap.
83
84 char* heap_address;
85 ReservedSpace heap_rs;
86
87 size_t heap_alignment = collector_policy()->heap_alignment();
133
134 os::trace_page_sizes("Heap",
135 collector_policy()->min_heap_byte_size(),
136 total_reserved,
137 alignment,
138 heap_rs->base(),
139 heap_rs->size());
140
141 return heap_rs->base();
142 }
143
144 void GenCollectedHeap::post_initialize() {
145 CollectedHeap::post_initialize();
146 ref_processing_init();
147 check_gen_kinds();
148 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
149
150 _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
151 _old_gen->capacity(),
152 def_new_gen->from()->capacity());
153 _gen_policy->initialize_gc_policy_counters();
154 }
155
156 void GenCollectedHeap::ref_processing_init() {
157 _young_gen->ref_processor_init();
158 _old_gen->ref_processor_init();
159 }
160
161 size_t GenCollectedHeap::capacity() const {
162 return _young_gen->capacity() + _old_gen->capacity();
163 }
164
165 size_t GenCollectedHeap::used() const {
166 return _young_gen->used() + _old_gen->used();
167 }
168
169 void GenCollectedHeap::save_used_regions() {
170 _old_gen->save_used_region();
171 _young_gen->save_used_region();
172 }
314 // Read the gc count while the heap lock is held.
315 gc_count_before = total_collections();
316 }
317
318 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
319 VMThread::execute(&op);
320 if (op.prologue_succeeded()) {
321 result = op.result();
322 if (op.gc_locked()) {
323 assert(result == NULL, "must be NULL if gc_locked() is true");
324 continue; // Retry and/or stall as necessary.
325 }
326
327 // Allocation has failed and a collection
328 // has been done. If the gc time limit was exceeded the
329 // this time, return NULL so that an out-of-memory
330 // will be thrown. Clear gc_overhead_limit_exceeded
331 // so that the overhead exceeded does not persist.
332
333 const bool limit_exceeded = gen_policy()->size_policy()->gc_overhead_limit_exceeded();
334 const bool softrefs_clear = gen_policy()->all_soft_refs_clear();
335
336 if (limit_exceeded && softrefs_clear) {
337 *gc_overhead_limit_was_exceeded = true;
338 gen_policy()->size_policy()->set_gc_overhead_limit_exceeded(false);
339 if (op.result() != NULL) {
340 CollectedHeap::fill_with_object(op.result(), size);
341 }
342 return NULL;
343 }
344 assert(result == NULL || is_in_reserved(result),
345 "result not in heap");
346 return result;
347 }
348
349 // Give a warning if we seem to be looping forever.
350 if ((QueuedAllocationWarningCount > 0) &&
351 (try_count % QueuedAllocationWarningCount == 0)) {
352 log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
353 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
354 }
505 bool is_tlab,
506 GenerationType max_generation) {
507 ResourceMark rm;
508 DEBUG_ONLY(Thread* my_thread = Thread::current();)
509
510 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
511 assert(my_thread->is_VM_thread() ||
512 my_thread->is_ConcurrentGC_thread(),
513 "incorrect thread type capability");
514 assert(Heap_lock->is_locked(),
515 "the requesting thread should have the Heap_lock");
516 guarantee(!is_gc_active(), "collection is not reentrant");
517
518 if (GCLocker::check_active_before_gc()) {
519 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
520 }
521
522 GCIdMark gc_id_mark;
523
524 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
525 collector_policy()->should_clear_all_soft_refs();
526
527 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
528
529 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
530
531 print_heap_before_gc();
532
533 {
534 FlagSetting fl(_is_gc_active, true);
535
536 bool complete = full && (max_generation == OldGen);
537 bool old_collects_young = complete && !ScavengeBeforeFullGC;
538 bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
539
540 FormatBuffer<> gc_string("%s", "Pause ");
541 if (do_young_collection) {
542 gc_string.append("Young");
543 } else {
544 gc_string.append("Full");
545 }
546
547 GCTraceCPUTime tcpu;
703 // we can to reclaim memory. Force collection of soft references. Force
704 // a complete compaction of the heap. Any additional methods for finding
705 // free memory should be here, especially if they are expensive. If this
706 // attempt fails, an OOM exception will be thrown.
707 {
708 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
709
710 do_collection(true, // full
711 true, // clear_all_soft_refs
712 size, // size
713 is_tlab, // is_tlab
714 GenCollectedHeap::OldGen); // max_generation
715 }
716
717 result = attempt_allocation(size, is_tlab, false /* first_only */);
718 if (result != NULL) {
719 assert(is_in_reserved(result), "result not in heap");
720 return result;
721 }
722
723 assert(!gen_policy()->should_clear_all_soft_refs(),
724 "Flag should have been handled and cleared prior to this point");
725
726 // What else? We might try synchronous finalization later. If the total
727 // space available is large enough for the allocation, then a more
728 // complete compaction phase than we've tried so far might be
729 // appropriate.
730 return NULL;
731 }
732
733 #ifdef ASSERT
734 class AssertNonScavengableClosure: public OopClosure {
735 public:
736 virtual void do_oop(oop* p) {
737 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
738 "Referent should not be scavengable."); }
739 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
740 };
741 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
742 #endif
743
|
48 #include "memory/filemap.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/biasedLocking.hpp"
52 #include "runtime/handles.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "services/management.hpp"
57 #include "services/memoryService.hpp"
58 #include "utilities/debug.hpp"
59 #include "utilities/formatBuffer.hpp"
60 #include "utilities/macros.hpp"
61 #include "utilities/stack.inline.hpp"
62 #include "utilities/vmError.hpp"
63
64 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
65 CollectedHeap(),
66 _rem_set(NULL),
67 _gen_policy(policy),
68 _soft_ref_gen_policy(),
69 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
70 _full_collections_completed(0)
71 {
72 assert(policy != NULL, "Sanity check");
73 }
74
75 jint GenCollectedHeap::initialize() {
76 // While there are no constraints in the GC code that HeapWordSize
77 // be any particular value, there are multiple other areas in the
78 // system which believe this to be true (e.g. oop->object_size in some
79 // cases incorrectly returns the size in wordSize units rather than
80 // HeapWordSize).
81 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
82
83 // Allocate space for the heap.
84
85 char* heap_address;
86 ReservedSpace heap_rs;
87
88 size_t heap_alignment = collector_policy()->heap_alignment();
134
135 os::trace_page_sizes("Heap",
136 collector_policy()->min_heap_byte_size(),
137 total_reserved,
138 alignment,
139 heap_rs->base(),
140 heap_rs->size());
141
142 return heap_rs->base();
143 }
144
145 void GenCollectedHeap::post_initialize() {
146 CollectedHeap::post_initialize();
147 ref_processing_init();
148 check_gen_kinds();
149 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
150
151 _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
152 _old_gen->capacity(),
153 def_new_gen->from()->capacity());
154
155 _gen_policy->initialize_gc_policy_counters();
156 }
157
158 void GenCollectedHeap::ref_processing_init() {
159 _young_gen->ref_processor_init();
160 _old_gen->ref_processor_init();
161 }
162
163 size_t GenCollectedHeap::capacity() const {
164 return _young_gen->capacity() + _old_gen->capacity();
165 }
166
167 size_t GenCollectedHeap::used() const {
168 return _young_gen->used() + _old_gen->used();
169 }
170
171 void GenCollectedHeap::save_used_regions() {
172 _old_gen->save_used_region();
173 _young_gen->save_used_region();
174 }
316 // Read the gc count while the heap lock is held.
317 gc_count_before = total_collections();
318 }
319
320 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
321 VMThread::execute(&op);
322 if (op.prologue_succeeded()) {
323 result = op.result();
324 if (op.gc_locked()) {
325 assert(result == NULL, "must be NULL if gc_locked() is true");
326 continue; // Retry and/or stall as necessary.
327 }
328
329 // Allocation has failed and a collection
330 // has been done. If the gc time limit was exceeded the
331 // this time, return NULL so that an out-of-memory
332 // will be thrown. Clear gc_overhead_limit_exceeded
333 // so that the overhead exceeded does not persist.
334
335 const bool limit_exceeded = gen_policy()->size_policy()->gc_overhead_limit_exceeded();
336 const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
337
338 if (limit_exceeded && softrefs_clear) {
339 *gc_overhead_limit_was_exceeded = true;
340 gen_policy()->size_policy()->set_gc_overhead_limit_exceeded(false);
341 if (op.result() != NULL) {
342 CollectedHeap::fill_with_object(op.result(), size);
343 }
344 return NULL;
345 }
346 assert(result == NULL || is_in_reserved(result),
347 "result not in heap");
348 return result;
349 }
350
351 // Give a warning if we seem to be looping forever.
352 if ((QueuedAllocationWarningCount > 0) &&
353 (try_count % QueuedAllocationWarningCount == 0)) {
354 log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
355 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
356 }
507 bool is_tlab,
508 GenerationType max_generation) {
509 ResourceMark rm;
510 DEBUG_ONLY(Thread* my_thread = Thread::current();)
511
512 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
513 assert(my_thread->is_VM_thread() ||
514 my_thread->is_ConcurrentGC_thread(),
515 "incorrect thread type capability");
516 assert(Heap_lock->is_locked(),
517 "the requesting thread should have the Heap_lock");
518 guarantee(!is_gc_active(), "collection is not reentrant");
519
520 if (GCLocker::check_active_before_gc()) {
521 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
522 }
523
524 GCIdMark gc_id_mark;
525
526 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
527 soft_ref_policy()->should_clear_all_soft_refs();
528
529 ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
530
531 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
532
533 print_heap_before_gc();
534
535 {
536 FlagSetting fl(_is_gc_active, true);
537
538 bool complete = full && (max_generation == OldGen);
539 bool old_collects_young = complete && !ScavengeBeforeFullGC;
540 bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
541
542 FormatBuffer<> gc_string("%s", "Pause ");
543 if (do_young_collection) {
544 gc_string.append("Young");
545 } else {
546 gc_string.append("Full");
547 }
548
549 GCTraceCPUTime tcpu;
705 // we can to reclaim memory. Force collection of soft references. Force
706 // a complete compaction of the heap. Any additional methods for finding
707 // free memory should be here, especially if they are expensive. If this
708 // attempt fails, an OOM exception will be thrown.
709 {
710 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
711
712 do_collection(true, // full
713 true, // clear_all_soft_refs
714 size, // size
715 is_tlab, // is_tlab
716 GenCollectedHeap::OldGen); // max_generation
717 }
718
719 result = attempt_allocation(size, is_tlab, false /* first_only */);
720 if (result != NULL) {
721 assert(is_in_reserved(result), "result not in heap");
722 return result;
723 }
724
725 assert(!soft_ref_policy()->should_clear_all_soft_refs(),
726 "Flag should have been handled and cleared prior to this point");
727
728 // What else? We might try synchronous finalization later. If the total
729 // space available is large enough for the allocation, then a more
730 // complete compaction phase than we've tried so far might be
731 // appropriate.
732 return NULL;
733 }
734
735 #ifdef ASSERT
736 class AssertNonScavengableClosure: public OopClosure {
737 public:
738 virtual void do_oop(oop* p) {
739 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
740 "Referent should not be scavengable."); }
741 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
742 };
743 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
744 #endif
745
|