68 GCH_PS_ObjectSynchronizer_oops_do,
69 GCH_PS_FlatProfiler_oops_do,
70 GCH_PS_Management_oops_do,
71 GCH_PS_SystemDictionary_oops_do,
72 GCH_PS_ClassLoaderDataGraph_oops_do,
73 GCH_PS_jvmti_oops_do,
74 GCH_PS_CodeCache_oops_do,
75 GCH_PS_younger_gens,
76 // Leave this one last.
77 GCH_PS_NumElements
78 };
79
80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
81 SharedHeap(),
82 _rem_set(NULL),
83 _gen_policy(policy),
84 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
85 _full_collections_completed(0)
86 {
87 assert(policy != NULL, "Sanity check");
88 }
89
90 jint GenCollectedHeap::initialize() {
91 CollectedHeap::pre_initialize();
92
93 _n_gens = gen_policy()->number_of_generations();
94 assert(_n_gens == 2, "There is no support for more than two generations");
95
96 // While there are no constraints in the GC code that HeapWordSize
97 // be any particular value, there are multiple other areas in the
98 // system which believe this to be true (e.g. oop->object_size in some
99 // cases incorrectly returns the size in wordSize units rather than
100 // HeapWordSize).
101 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
102
103 // Allocate space for the heap.
104
105 char* heap_address;
106 ReservedSpace heap_rs;
107
149 assert(alignment % pageSize == 0, "Must be");
150
151 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
152 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
153
154 // Check for overflow.
155 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
156 if (total_reserved < young_spec->max_size()) {
157 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
158 "the maximum representable size");
159 }
160 assert(total_reserved % alignment == 0,
161 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
162 SIZE_FORMAT, total_reserved, alignment));
163
164 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
165 return heap_rs->base();
166 }
167
168 void GenCollectedHeap::post_initialize() {
169 SharedHeap::post_initialize();
170 GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
171 guarantee(policy->is_generation_policy(), "Illegal policy type");
172 assert((_young_gen->kind() == Generation::DefNew) ||
173 (_young_gen->kind() == Generation::ParNew),
174 "Wrong youngest generation type");
175 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
176
177 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
178 _old_gen->kind() == Generation::MarkSweepCompact,
179 "Wrong generation kind");
180
181 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
182 _old_gen->capacity(),
183 def_new_gen->from()->capacity());
184 policy->initialize_gc_policy_counters();
185 }
186
187 void GenCollectedHeap::ref_processing_init() {
188 SharedHeap::ref_processing_init();
189 _young_gen->ref_processor_init();
190 _old_gen->ref_processor_init();
191 }
192
193 size_t GenCollectedHeap::capacity() const {
194 return _young_gen->capacity() + _old_gen->capacity();
195 }
196
197 size_t GenCollectedHeap::used() const {
198 return _young_gen->used() + _old_gen->used();
199 }
200
201 // Save the "used_region" for generations level and lower.
202 void GenCollectedHeap::save_used_regions(int level) {
203 assert(level >= 0, "Illegal level parameter");
204 assert(level < _n_gens, "Illegal level parameter");
205 if (level == 1) {
206 _old_gen->save_used_region();
207 }
208 _young_gen->save_used_region();
543
544 gc_epilogue(complete);
545
546 if (must_restore_marks_for_biased_locking) {
547 BiasedLocking::restore_marks();
548 }
549 }
550
551 print_heap_after_gc();
552
553 #ifdef TRACESPINNING
554 ParallelTaskTerminator::print_termination_counts();
555 #endif
556 }
557
558 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
559 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
560 }
561
562 void GenCollectedHeap::set_par_threads(uint t) {
563 SharedHeap::set_par_threads(t);
564 set_n_termination(t);
565 }
566
567 void GenCollectedHeap::set_n_termination(uint t) {
568 _process_strong_tasks->set_n_threads(t);
569 }
570
571 #ifdef ASSERT
572 class AssertNonScavengableClosure: public OopClosure {
573 public:
574 virtual void do_oop(oop* p) {
575 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
576 "Referent should not be scavengable."); }
577 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
578 };
579 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
580 #endif
581
582 void GenCollectedHeap::process_roots(bool activate_scope,
583 ScanningOption so,
907 VerifyDuringStartup ||
908 PrintAssembly ||
909 tty->count() != 0 || // already printing
910 VerifyAfterGC ||
911 VMError::fatal_error_in_progress(), "too expensive");
912
913 #endif
914 return _young_gen->is_in(p) || _old_gen->is_in(p);
915 }
916
917 #ifdef ASSERT
918 // Don't implement this by using is_in_young(). This method is used
919 // in some cases to check that is_in_young() is correct.
920 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
921 assert(is_in_reserved(p) || p == NULL,
922 "Does not work if address is non-null and outside of the heap");
923 return p < _young_gen->reserved().end() && p != NULL;
924 }
925 #endif
926
927 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
928 _young_gen->oop_iterate(cl);
929 _old_gen->oop_iterate(cl);
930 }
931
932 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
933 _young_gen->object_iterate(cl);
934 _old_gen->object_iterate(cl);
935 }
936
937 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
938 _young_gen->safe_object_iterate(cl);
939 _old_gen->safe_object_iterate(cl);
940 }
941
942 Space* GenCollectedHeap::space_containing(const void* addr) const {
943 Space* res = _young_gen->space_containing(addr);
944 if (res != NULL) {
945 return res;
946 }
1074 void do_generation(Generation* gen) {
1075 gen->prepare_for_verify();
1076 }
1077 };
1078
1079 void GenCollectedHeap::prepare_for_verify() {
1080 ensure_parsability(false); // no need to retire TLABs
1081 GenPrepareForVerifyClosure blk;
1082 generation_iterate(&blk, false);
1083 }
1084
1085 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1086 bool old_to_young) {
1087 if (old_to_young) {
1088 cl->do_generation(_old_gen);
1089 cl->do_generation(_young_gen);
1090 } else {
1091 cl->do_generation(_young_gen);
1092 cl->do_generation(_old_gen);
1093 }
1094 }
1095
1096 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1097 _young_gen->space_iterate(cl, true);
1098 _old_gen->space_iterate(cl, true);
1099 }
1100
1101 bool GenCollectedHeap::is_maximal_no_gc() const {
1102 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1103 }
1104
1105 void GenCollectedHeap::save_marks() {
1106 _young_gen->save_marks();
1107 _old_gen->save_marks();
1108 }
1109
1110 GenCollectedHeap* GenCollectedHeap::heap() {
1111 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1112 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1113 return _gch;
1114 }
1115
1116
1117 void GenCollectedHeap::prepare_for_compaction() {
1118 guarantee(_n_gens = 2, "Wrong number of generations");
|
68 GCH_PS_ObjectSynchronizer_oops_do,
69 GCH_PS_FlatProfiler_oops_do,
70 GCH_PS_Management_oops_do,
71 GCH_PS_SystemDictionary_oops_do,
72 GCH_PS_ClassLoaderDataGraph_oops_do,
73 GCH_PS_jvmti_oops_do,
74 GCH_PS_CodeCache_oops_do,
75 GCH_PS_younger_gens,
76 // Leave this one last.
77 GCH_PS_NumElements
78 };
79
80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
81 SharedHeap(),
82 _rem_set(NULL),
83 _gen_policy(policy),
84 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
85 _full_collections_completed(0)
86 {
87 assert(policy != NULL, "Sanity check");
88 if (UseConcMarkSweepGC) {
89 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
90 /* are_GC_task_threads */true,
91 /* are_ConcurrentGC_threads */false);
92 _workers->initialize_workers();
93 } else {
94 // Serial GC does not use workers.
95 _workers = NULL;
96 }
97 }
98
99 jint GenCollectedHeap::initialize() {
100 CollectedHeap::pre_initialize();
101
102 _n_gens = gen_policy()->number_of_generations();
103 assert(_n_gens == 2, "There is no support for more than two generations");
104
105 // While there are no constraints in the GC code that HeapWordSize
106 // be any particular value, there are multiple other areas in the
107 // system which believe this to be true (e.g. oop->object_size in some
108 // cases incorrectly returns the size in wordSize units rather than
109 // HeapWordSize).
110 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
111
112 // Allocate space for the heap.
113
114 char* heap_address;
115 ReservedSpace heap_rs;
116
158 assert(alignment % pageSize == 0, "Must be");
159
160 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
161 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
162
163 // Check for overflow.
164 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
165 if (total_reserved < young_spec->max_size()) {
166 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
167 "the maximum representable size");
168 }
169 assert(total_reserved % alignment == 0,
170 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
171 SIZE_FORMAT, total_reserved, alignment));
172
173 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
174 return heap_rs->base();
175 }
176
177 void GenCollectedHeap::post_initialize() {
178 CollectedHeap::post_initialize();
179 ref_processing_init();
180 GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
181 guarantee(policy->is_generation_policy(), "Illegal policy type");
182 assert((_young_gen->kind() == Generation::DefNew) ||
183 (_young_gen->kind() == Generation::ParNew),
184 "Wrong youngest generation type");
185 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
186
187 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
188 _old_gen->kind() == Generation::MarkSweepCompact,
189 "Wrong generation kind");
190
191 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
192 _old_gen->capacity(),
193 def_new_gen->from()->capacity());
194 policy->initialize_gc_policy_counters();
195 }
196
197 void GenCollectedHeap::ref_processing_init() {
198 _young_gen->ref_processor_init();
199 _old_gen->ref_processor_init();
200 }
201
202 size_t GenCollectedHeap::capacity() const {
203 return _young_gen->capacity() + _old_gen->capacity();
204 }
205
206 size_t GenCollectedHeap::used() const {
207 return _young_gen->used() + _old_gen->used();
208 }
209
210 // Save the "used_region" for generations level and lower.
211 void GenCollectedHeap::save_used_regions(int level) {
212 assert(level >= 0, "Illegal level parameter");
213 assert(level < _n_gens, "Illegal level parameter");
214 if (level == 1) {
215 _old_gen->save_used_region();
216 }
217 _young_gen->save_used_region();
552
553 gc_epilogue(complete);
554
555 if (must_restore_marks_for_biased_locking) {
556 BiasedLocking::restore_marks();
557 }
558 }
559
560 print_heap_after_gc();
561
562 #ifdef TRACESPINNING
563 ParallelTaskTerminator::print_termination_counts();
564 #endif
565 }
566
567 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
568 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
569 }
570
571 void GenCollectedHeap::set_par_threads(uint t) {
572 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
573 CollectedHeap::set_par_threads(t);
574 set_n_termination(t);
575 }
576
577 void GenCollectedHeap::set_n_termination(uint t) {
578 _process_strong_tasks->set_n_threads(t);
579 }
580
581 #ifdef ASSERT
582 class AssertNonScavengableClosure: public OopClosure {
583 public:
584 virtual void do_oop(oop* p) {
585 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
586 "Referent should not be scavengable."); }
587 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
588 };
589 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
590 #endif
591
592 void GenCollectedHeap::process_roots(bool activate_scope,
593 ScanningOption so,
917 VerifyDuringStartup ||
918 PrintAssembly ||
919 tty->count() != 0 || // already printing
920 VerifyAfterGC ||
921 VMError::fatal_error_in_progress(), "too expensive");
922
923 #endif
924 return _young_gen->is_in(p) || _old_gen->is_in(p);
925 }
926
927 #ifdef ASSERT
928 // Don't implement this by using is_in_young(). This method is used
929 // in some cases to check that is_in_young() is correct.
930 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
931 assert(is_in_reserved(p) || p == NULL,
932 "Does not work if address is non-null and outside of the heap");
933 return p < _young_gen->reserved().end() && p != NULL;
934 }
935 #endif
936
937 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
938 NoHeaderExtendedOopClosure no_header_cl(cl);
939 oop_iterate(&no_header_cl);
940 }
941
942 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
943 _young_gen->oop_iterate(cl);
944 _old_gen->oop_iterate(cl);
945 }
946
947 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
948 _young_gen->object_iterate(cl);
949 _old_gen->object_iterate(cl);
950 }
951
952 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
953 _young_gen->safe_object_iterate(cl);
954 _old_gen->safe_object_iterate(cl);
955 }
956
957 Space* GenCollectedHeap::space_containing(const void* addr) const {
958 Space* res = _young_gen->space_containing(addr);
959 if (res != NULL) {
960 return res;
961 }
1089 void do_generation(Generation* gen) {
1090 gen->prepare_for_verify();
1091 }
1092 };
1093
1094 void GenCollectedHeap::prepare_for_verify() {
1095 ensure_parsability(false); // no need to retire TLABs
1096 GenPrepareForVerifyClosure blk;
1097 generation_iterate(&blk, false);
1098 }
1099
1100 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1101 bool old_to_young) {
1102 if (old_to_young) {
1103 cl->do_generation(_old_gen);
1104 cl->do_generation(_young_gen);
1105 } else {
1106 cl->do_generation(_young_gen);
1107 cl->do_generation(_old_gen);
1108 }
1109 }
1110
1111 bool GenCollectedHeap::is_maximal_no_gc() const {
1112 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1113 }
1114
1115 void GenCollectedHeap::save_marks() {
1116 _young_gen->save_marks();
1117 _old_gen->save_marks();
1118 }
1119
1120 GenCollectedHeap* GenCollectedHeap::heap() {
1121 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1122 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1123 return _gch;
1124 }
1125
1126
1127 void GenCollectedHeap::prepare_for_compaction() {
1128 guarantee(_n_gens = 2, "Wrong number of generations");
|