68 // Leave this one last.
69 GCH_PS_NumElements
70 };
71
72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 SharedHeap(policy),
74 _gen_policy(policy),
75 _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 _full_collections_completed(0)
77 {
78 if (_gen_process_roots_tasks == NULL ||
79 !_gen_process_roots_tasks->valid()) {
80 vm_exit_during_initialization("Failed necessary allocation.");
81 }
82 assert(policy != NULL, "Sanity check");
83 }
84
85 jint GenCollectedHeap::initialize() {
86 CollectedHeap::pre_initialize();
87
88 int i;
89 _n_gens = gen_policy()->number_of_generations();
90
91 // While there are no constraints in the GC code that HeapWordSize
92 // be any particular value, there are multiple other areas in the
93 // system which believe this to be true (e.g. oop->object_size in some
94 // cases incorrectly returns the size in wordSize units rather than
95 // HeapWordSize).
96 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
97
98 // Allocate space for the heap.
99
100 char* heap_address;
101 size_t total_reserved = 0;
102 int n_covered_regions = 0;
103 ReservedSpace heap_rs;
104
105 size_t heap_alignment = collector_policy()->heap_alignment();
106
107 heap_address = allocate(heap_alignment, &total_reserved,
108 &n_covered_regions, &heap_rs);
109
110 if (!heap_rs.is_reserved()) {
146 size_t* _total_reserved,
147 int* _n_covered_regions,
148 ReservedSpace* heap_rs){
149 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
150 "the maximum representable size";
151
152 // Now figure out the total size.
153 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
154 assert(alignment % pageSize == 0, "Must be");
155
156 size_t total_reserved = gen_policy()->young_gen_spec()->max_size() +
157 gen_policy()->old_gen_spec()->max_size();
158 if (total_reserved < gen_policy()->young_gen_spec()->max_size() ||
159 total_reserved < gen_policy()->old_gen_spec()->max_size()) {
160 vm_exit_during_initialization(overflow_msg);
161 }
162 assert(total_reserved % alignment == 0,
163 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
164 SIZE_FORMAT, total_reserved, alignment));
165
166 int n_covered_regions = gen_policy()->young_gen_spec()->n_covered_regions() +
167 gen_policy()->old_gen_spec()->n_covered_regions();
168
169 // Needed until the cardtable is fixed to have the right number
170 // of covered regions.
171 n_covered_regions += 2;
172
173 *_total_reserved = total_reserved;
174 *_n_covered_regions = n_covered_regions;
175
176 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
177 return heap_rs->base();
178 }
179
180 void GenCollectedHeap::post_initialize() {
181 SharedHeap::post_initialize();
182 GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
183 guarantee(policy->is_generation_policy(), "Illegal policy type");
184 DefNewGeneration* def_new_gen = (DefNewGeneration*) _young_gen;
185 assert(def_new_gen->kind() == Generation::DefNew ||
186 def_new_gen->kind() == Generation::ParNew,
187 "Wrong generation kind");
195 def_new_gen->from()->capacity());
196 policy->initialize_gc_policy_counters();
197 }
198
199 void GenCollectedHeap::ref_processing_init() {
200 SharedHeap::ref_processing_init();
201 _young_gen->ref_processor_init();
202 _old_gen->ref_processor_init();
203 }
204
205 size_t GenCollectedHeap::capacity() const {
206 return _young_gen->capacity() + _old_gen->capacity();
207 }
208
209 size_t GenCollectedHeap::used() const {
210 return _young_gen->used() + _old_gen->used();
211 }
212
213 // Save the "used_region" for generations level and lower.
214 void GenCollectedHeap::save_used_regions(int level) {
215 assert(level < _n_gens, "Illegal level parameter");
216 if (level == 1) {
217 _old_gen->save_used_region();
218 }
219 _young_gen->save_used_region();
220 }
221
222 size_t GenCollectedHeap::max_capacity() const {
223 return _young_gen->max_capacity() + _old_gen->max_capacity();
224 }
225
226 // Update the _full_collections_completed counter
227 // at the end of a stop-world full GC.
228 unsigned int GenCollectedHeap::update_full_collections_completed() {
229 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
230 assert(_full_collections_completed <= _total_full_collections,
231 "Can't complete more collections than were started");
232 _full_collections_completed = _total_full_collections;
233 ml.notify_all();
234 return _full_collections_completed;
235 }
745 }
746 return true; // success
747 }
748
749 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
750 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
751
752 MutexLocker ml(Heap_lock);
753 // Read the GC counts while holding the Heap_lock
754 unsigned int full_gc_count_before = total_full_collections();
755 unsigned int gc_count_before = total_collections();
756 {
757 MutexUnlocker mu(Heap_lock);
758 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
759 VMThread::execute(&op);
760 }
761 }
762 #endif // INCLUDE_ALL_GCS
763
764 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
765 do_full_collection(clear_all_soft_refs, _n_gens - 1);
766 }
767
768 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
769 int max_level) {
770 int local_max_level;
771 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
772 gc_cause() == GCCause::_gc_locker) {
773 local_max_level = 0;
774 } else {
775 local_max_level = max_level;
776 }
777
778 do_collection(true /* full */,
779 clear_all_soft_refs /* clear_all_soft_refs */,
780 0 /* size */,
781 false /* is_tlab */,
782 local_max_level /* max_level */);
783 // Hack XXX FIX ME !!!
784 // A scavenge may not have been attempted, or may have
785 // been attempted and failed, because the old gen was too full
1009 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1010 _young_gen->space_iterate(cl, true);
1011 _old_gen->space_iterate(cl, true);
1012 }
1013
1014 bool GenCollectedHeap::is_maximal_no_gc() const {
1015 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1016 }
1017
1018 void GenCollectedHeap::save_marks() {
1019 _young_gen->save_marks();
1020 _old_gen->save_marks();
1021 }
1022
1023 GenCollectedHeap* GenCollectedHeap::heap() {
1024 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1025 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1026 return _gch;
1027 }
1028
1029
1030 void GenCollectedHeap::prepare_for_compaction() {
1031 guarantee(_n_gens = 2, "Wrong number of generations");
1032 // Start by compacting into same gen.
1033 CompactPoint cp(_old_gen);
1034 _old_gen->prepare_for_compaction(&cp);
1035 _young_gen->prepare_for_compaction(&cp);
1036 }
1037
1038 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1039 if (!silent) {
1040 gclog_or_tty->print("%s", _old_gen->name());
1041 gclog_or_tty->print(" ");
1042 }
1043 _old_gen->verify();
1044
1045 if (!silent) {
1046 gclog_or_tty->print("%s", _young_gen->name());
1047 gclog_or_tty->print(" ");
1048 }
1049 _young_gen->verify();
1050
1051 if (!silent) {
|
68 // Leave this one last.
69 GCH_PS_NumElements
70 };
71
72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 SharedHeap(policy),
74 _gen_policy(policy),
75 _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 _full_collections_completed(0)
77 {
78 if (_gen_process_roots_tasks == NULL ||
79 !_gen_process_roots_tasks->valid()) {
80 vm_exit_during_initialization("Failed necessary allocation.");
81 }
82 assert(policy != NULL, "Sanity check");
83 }
84
85 jint GenCollectedHeap::initialize() {
86 CollectedHeap::pre_initialize();
87
88 // While there are no constraints in the GC code that HeapWordSize
89 // be any particular value, there are multiple other areas in the
90 // system which believe this to be true (e.g. oop->object_size in some
91 // cases incorrectly returns the size in wordSize units rather than
92 // HeapWordSize).
93 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
94
95 // Allocate space for the heap.
96
97 char* heap_address;
98 size_t total_reserved = 0;
99 int n_covered_regions = 0;
100 ReservedSpace heap_rs;
101
102 size_t heap_alignment = collector_policy()->heap_alignment();
103
104 heap_address = allocate(heap_alignment, &total_reserved,
105 &n_covered_regions, &heap_rs);
106
107 if (!heap_rs.is_reserved()) {
143 size_t* _total_reserved,
144 int* _n_covered_regions,
145 ReservedSpace* heap_rs){
146 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
147 "the maximum representable size";
148
149 // Now figure out the total size.
150 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
151 assert(alignment % pageSize == 0, "Must be");
152
153 size_t total_reserved = gen_policy()->young_gen_spec()->max_size() +
154 gen_policy()->old_gen_spec()->max_size();
155 if (total_reserved < gen_policy()->young_gen_spec()->max_size() ||
156 total_reserved < gen_policy()->old_gen_spec()->max_size()) {
157 vm_exit_during_initialization(overflow_msg);
158 }
159 assert(total_reserved % alignment == 0,
160 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
161 SIZE_FORMAT, total_reserved, alignment));
162
163 int n_covered_regions = 2; // Young + Old
164
165 // Needed until the cardtable is fixed to have the right number
166 // of covered regions.
167 n_covered_regions += 2;
168
169 *_total_reserved = total_reserved;
170 *_n_covered_regions = n_covered_regions;
171
172 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
173 return heap_rs->base();
174 }
175
176 void GenCollectedHeap::post_initialize() {
177 SharedHeap::post_initialize();
178 GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
179 guarantee(policy->is_generation_policy(), "Illegal policy type");
180 DefNewGeneration* def_new_gen = (DefNewGeneration*) _young_gen;
181 assert(def_new_gen->kind() == Generation::DefNew ||
182 def_new_gen->kind() == Generation::ParNew,
183 "Wrong generation kind");
191 def_new_gen->from()->capacity());
192 policy->initialize_gc_policy_counters();
193 }
194
195 void GenCollectedHeap::ref_processing_init() {
196 SharedHeap::ref_processing_init();
197 _young_gen->ref_processor_init();
198 _old_gen->ref_processor_init();
199 }
200
201 size_t GenCollectedHeap::capacity() const {
202 return _young_gen->capacity() + _old_gen->capacity();
203 }
204
205 size_t GenCollectedHeap::used() const {
206 return _young_gen->used() + _old_gen->used();
207 }
208
209 // Save the "used_region" for generations level and lower.
210 void GenCollectedHeap::save_used_regions(int level) {
211 assert(level < _gen_policy->number_of_generations(), "Illegal level parameter");
212 if (level == 1) {
213 _old_gen->save_used_region();
214 }
215 _young_gen->save_used_region();
216 }
217
218 size_t GenCollectedHeap::max_capacity() const {
219 return _young_gen->max_capacity() + _old_gen->max_capacity();
220 }
221
222 // Update the _full_collections_completed counter
223 // at the end of a stop-world full GC.
224 unsigned int GenCollectedHeap::update_full_collections_completed() {
225 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
226 assert(_full_collections_completed <= _total_full_collections,
227 "Can't complete more collections than were started");
228 _full_collections_completed = _total_full_collections;
229 ml.notify_all();
230 return _full_collections_completed;
231 }
741 }
742 return true; // success
743 }
744
745 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
746 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
747
748 MutexLocker ml(Heap_lock);
749 // Read the GC counts while holding the Heap_lock
750 unsigned int full_gc_count_before = total_full_collections();
751 unsigned int gc_count_before = total_collections();
752 {
753 MutexUnlocker mu(Heap_lock);
754 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
755 VMThread::execute(&op);
756 }
757 }
758 #endif // INCLUDE_ALL_GCS
759
760 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
761 do_full_collection(clear_all_soft_refs, _gen_policy->number_of_generations() - 1);
762 }
763
764 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
765 int max_level) {
766 int local_max_level;
767 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
768 gc_cause() == GCCause::_gc_locker) {
769 local_max_level = 0;
770 } else {
771 local_max_level = max_level;
772 }
773
774 do_collection(true /* full */,
775 clear_all_soft_refs /* clear_all_soft_refs */,
776 0 /* size */,
777 false /* is_tlab */,
778 local_max_level /* max_level */);
779 // Hack XXX FIX ME !!!
780 // A scavenge may not have been attempted, or may have
781 // been attempted and failed, because the old gen was too full
1005 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1006 _young_gen->space_iterate(cl, true);
1007 _old_gen->space_iterate(cl, true);
1008 }
1009
1010 bool GenCollectedHeap::is_maximal_no_gc() const {
1011 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1012 }
1013
1014 void GenCollectedHeap::save_marks() {
1015 _young_gen->save_marks();
1016 _old_gen->save_marks();
1017 }
1018
1019 GenCollectedHeap* GenCollectedHeap::heap() {
1020 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1021 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1022 return _gch;
1023 }
1024
1025 void GenCollectedHeap::prepare_for_compaction() {
1026 // Start by compacting into same gen.
1027 CompactPoint cp(_old_gen);
1028 _old_gen->prepare_for_compaction(&cp);
1029 _young_gen->prepare_for_compaction(&cp);
1030 }
1031
1032 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1033 if (!silent) {
1034 gclog_or_tty->print("%s", _old_gen->name());
1035 gclog_or_tty->print(" ");
1036 }
1037 _old_gen->verify();
1038
1039 if (!silent) {
1040 gclog_or_tty->print("%s", _young_gen->name());
1041 gclog_or_tty->print(" ");
1042 }
1043 _young_gen->verify();
1044
1045 if (!silent) {
|