68 GCH_PS_ObjectSynchronizer_oops_do,
69 GCH_PS_FlatProfiler_oops_do,
70 GCH_PS_Management_oops_do,
71 GCH_PS_SystemDictionary_oops_do,
72 GCH_PS_ClassLoaderDataGraph_oops_do,
73 GCH_PS_jvmti_oops_do,
74 GCH_PS_CodeCache_oops_do,
75 GCH_PS_younger_gens,
76 // Leave this one last.
77 GCH_PS_NumElements
78 };
79
80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
81 SharedHeap(),
82 _rem_set(NULL),
83 _gen_policy(policy),
84 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
85 _full_collections_completed(0)
86 {
87 assert(policy != NULL, "Sanity check");
88 }
89
90 jint GenCollectedHeap::initialize() {
91 CollectedHeap::pre_initialize();
92
93 _n_gens = gen_policy()->number_of_generations();
94 assert(_n_gens == 2, "There is no support for more than two generations");
95
96 // While there are no constraints in the GC code that HeapWordSize
97 // be any particular value, there are multiple other areas in the
98 // system which believe this to be true (e.g. oop->object_size in some
99 // cases incorrectly returns the size in wordSize units rather than
100 // HeapWordSize).
101 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
102
103 // Allocate space for the heap.
104
105 char* heap_address;
106 ReservedSpace heap_rs;
107
108 size_t heap_alignment = collector_policy()->heap_alignment();
109
110 heap_address = allocate(heap_alignment, &heap_rs);
111
112 if (!heap_rs.is_reserved()) {
113 vm_shutdown_during_initialization(
114 "Could not reserve enough space for object heap");
115 return JNI_ENOMEM;
116 }
117
118 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
119
120 _rem_set = collector_policy()->create_rem_set(reserved_region());
121 set_barrier_set(rem_set()->bs());
122
123 _gch = this;
124
125 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
126 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
127 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
128
129 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
130 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
131 clear_incremental_collection_failed();
132
133 #if INCLUDE_ALL_GCS
134 // If we are running CMS, create the collector responsible
135 // for collecting the CMS generations.
136 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
137 bool success = create_cms_collector();
138 if (!success) return JNI_ENOMEM;
139 }
140 #endif // INCLUDE_ALL_GCS
141
149 assert(alignment % pageSize == 0, "Must be");
150
151 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
152 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
153
154 // Check for overflow.
155 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
156 if (total_reserved < young_spec->max_size()) {
157 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
158 "the maximum representable size");
159 }
160 assert(total_reserved % alignment == 0,
161 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
162 SIZE_FORMAT, total_reserved, alignment));
163
164 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
165 return heap_rs->base();
166 }
167
168 void GenCollectedHeap::post_initialize() {
169 SharedHeap::post_initialize();
170 GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
171 guarantee(policy->is_generation_policy(), "Illegal policy type");
172 assert((_young_gen->kind() == Generation::DefNew) ||
173 (_young_gen->kind() == Generation::ParNew),
174 "Wrong youngest generation type");
175 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
176
177 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
178 _old_gen->kind() == Generation::MarkSweepCompact,
179 "Wrong generation kind");
180
181 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
182 _old_gen->capacity(),
183 def_new_gen->from()->capacity());
184 policy->initialize_gc_policy_counters();
185 }
186
187 void GenCollectedHeap::ref_processing_init() {
188 SharedHeap::ref_processing_init();
189 _young_gen->ref_processor_init();
190 _old_gen->ref_processor_init();
191 }
192
193 size_t GenCollectedHeap::capacity() const {
194 return _young_gen->capacity() + _old_gen->capacity();
195 }
196
197 size_t GenCollectedHeap::used() const {
198 return _young_gen->used() + _old_gen->used();
199 }
200
201 // Save the "used_region" for generations level and lower.
202 void GenCollectedHeap::save_used_regions(int level) {
203 assert(level >= 0, "Illegal level parameter");
204 assert(level < _n_gens, "Illegal level parameter");
205 if (level == 1) {
206 _old_gen->save_used_region();
207 }
208 _young_gen->save_used_region();
543
544 gc_epilogue(complete);
545
546 if (must_restore_marks_for_biased_locking) {
547 BiasedLocking::restore_marks();
548 }
549 }
550
551 print_heap_after_gc();
552
553 #ifdef TRACESPINNING
554 ParallelTaskTerminator::print_termination_counts();
555 #endif
556 }
557
558 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
559 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
560 }
561
562 void GenCollectedHeap::set_par_threads(uint t) {
563 SharedHeap::set_par_threads(t);
564 set_n_termination(t);
565 }
566
567 void GenCollectedHeap::set_n_termination(uint t) {
568 _process_strong_tasks->set_n_threads(t);
569 }
570
571 #ifdef ASSERT
572 class AssertNonScavengableClosure: public OopClosure {
573 public:
574 virtual void do_oop(oop* p) {
575 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
576 "Referent should not be scavengable."); }
577 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
578 };
579 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
580 #endif
581
582 void GenCollectedHeap::process_roots(bool activate_scope,
583 ScanningOption so,
1074 void do_generation(Generation* gen) {
1075 gen->prepare_for_verify();
1076 }
1077 };
1078
1079 void GenCollectedHeap::prepare_for_verify() {
1080 ensure_parsability(false); // no need to retire TLABs
1081 GenPrepareForVerifyClosure blk;
1082 generation_iterate(&blk, false);
1083 }
1084
1085 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1086 bool old_to_young) {
1087 if (old_to_young) {
1088 cl->do_generation(_old_gen);
1089 cl->do_generation(_young_gen);
1090 } else {
1091 cl->do_generation(_young_gen);
1092 cl->do_generation(_old_gen);
1093 }
1094 }
1095
1096 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1097 _young_gen->space_iterate(cl, true);
1098 _old_gen->space_iterate(cl, true);
1099 }
1100
1101 bool GenCollectedHeap::is_maximal_no_gc() const {
1102 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1103 }
1104
1105 void GenCollectedHeap::save_marks() {
1106 _young_gen->save_marks();
1107 _old_gen->save_marks();
1108 }
1109
1110 GenCollectedHeap* GenCollectedHeap::heap() {
1111 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1112 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1113 return _gch;
1114 }
1115
1116
1117 void GenCollectedHeap::prepare_for_compaction() {
1118 guarantee(_n_gens = 2, "Wrong number of generations");
|
68 GCH_PS_ObjectSynchronizer_oops_do,
69 GCH_PS_FlatProfiler_oops_do,
70 GCH_PS_Management_oops_do,
71 GCH_PS_SystemDictionary_oops_do,
72 GCH_PS_ClassLoaderDataGraph_oops_do,
73 GCH_PS_jvmti_oops_do,
74 GCH_PS_CodeCache_oops_do,
75 GCH_PS_younger_gens,
76 // Leave this one last.
77 GCH_PS_NumElements
78 };
79
80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
81 SharedHeap(),
82 _rem_set(NULL),
83 _gen_policy(policy),
84 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
85 _full_collections_completed(0)
86 {
87 assert(policy != NULL, "Sanity check");
88 if (UseConcMarkSweepGC) {
89 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
90 /* are_GC_task_threads */true,
91 /* are_ConcurrentGC_threads */false);
92 _workers->initialize_workers();
93 } else {
94 _workers = NULL;
95 }
96 }
97
98 jint GenCollectedHeap::initialize() {
99 CollectedHeap::pre_initialize();
100
101 _n_gens = gen_policy()->number_of_generations();
102 assert(_n_gens == 2, "There is no support for more than two generations");
103
104 // While there are no constraints in the GC code that HeapWordSize
105 // be any particular value, there are multiple other areas in the
106 // system which believe this to be true (e.g. oop->object_size in some
107 // cases incorrectly returns the size in wordSize units rather than
108 // HeapWordSize).
109 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
110
111 // Allocate space for the heap.
112
113 char* heap_address;
114 ReservedSpace heap_rs;
115
116 size_t heap_alignment = collector_policy()->heap_alignment();
117
118 heap_address = allocate(heap_alignment, &heap_rs);
119
120 if (!heap_rs.is_reserved()) {
121 vm_shutdown_during_initialization(
122 "Could not reserve enough space for object heap");
123 return JNI_ENOMEM;
124 }
125
126 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
127
128 _rem_set = collector_policy()->create_rem_set(reserved_region());
129 _barrier_set = rem_set()->bs();
130 oopDesc::set_bs(_barrier_set);
131
132 _gch = this;
133
134 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
135 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
136 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
137
138 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
139 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
140 clear_incremental_collection_failed();
141
142 #if INCLUDE_ALL_GCS
143 // If we are running CMS, create the collector responsible
144 // for collecting the CMS generations.
145 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
146 bool success = create_cms_collector();
147 if (!success) return JNI_ENOMEM;
148 }
149 #endif // INCLUDE_ALL_GCS
150
158 assert(alignment % pageSize == 0, "Must be");
159
160 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
161 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
162
163 // Check for overflow.
164 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
165 if (total_reserved < young_spec->max_size()) {
166 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
167 "the maximum representable size");
168 }
169 assert(total_reserved % alignment == 0,
170 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
171 SIZE_FORMAT, total_reserved, alignment));
172
173 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
174 return heap_rs->base();
175 }
176
177 void GenCollectedHeap::post_initialize() {
178 CollectedHeap::post_initialize();
179 ref_processing_init();
180 GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
181 guarantee(policy->is_generation_policy(), "Illegal policy type");
182 assert((_young_gen->kind() == Generation::DefNew) ||
183 (_young_gen->kind() == Generation::ParNew),
184 "Wrong youngest generation type");
185 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
186
187 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
188 _old_gen->kind() == Generation::MarkSweepCompact,
189 "Wrong generation kind");
190
191 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
192 _old_gen->capacity(),
193 def_new_gen->from()->capacity());
194 policy->initialize_gc_policy_counters();
195 }
196
197 void GenCollectedHeap::ref_processing_init() {
198 _young_gen->ref_processor_init();
199 _old_gen->ref_processor_init();
200 }
201
202 size_t GenCollectedHeap::capacity() const {
203 return _young_gen->capacity() + _old_gen->capacity();
204 }
205
206 size_t GenCollectedHeap::used() const {
207 return _young_gen->used() + _old_gen->used();
208 }
209
210 // Save the "used_region" for generations level and lower.
211 void GenCollectedHeap::save_used_regions(int level) {
212 assert(level >= 0, "Illegal level parameter");
213 assert(level < _n_gens, "Illegal level parameter");
214 if (level == 1) {
215 _old_gen->save_used_region();
216 }
217 _young_gen->save_used_region();
552
553 gc_epilogue(complete);
554
555 if (must_restore_marks_for_biased_locking) {
556 BiasedLocking::restore_marks();
557 }
558 }
559
560 print_heap_after_gc();
561
562 #ifdef TRACESPINNING
563 ParallelTaskTerminator::print_termination_counts();
564 #endif
565 }
566
567 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
568 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
569 }
570
571 void GenCollectedHeap::set_par_threads(uint t) {
572 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
573 CollectedHeap::set_par_threads(t);
574 set_n_termination(t);
575 }
576
577 void GenCollectedHeap::set_n_termination(uint t) {
578 _process_strong_tasks->set_n_threads(t);
579 }
580
581 #ifdef ASSERT
582 class AssertNonScavengableClosure: public OopClosure {
583 public:
584 virtual void do_oop(oop* p) {
585 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
586 "Referent should not be scavengable."); }
587 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
588 };
589 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
590 #endif
591
592 void GenCollectedHeap::process_roots(bool activate_scope,
593 ScanningOption so,
1084 void do_generation(Generation* gen) {
1085 gen->prepare_for_verify();
1086 }
1087 };
1088
1089 void GenCollectedHeap::prepare_for_verify() {
1090 ensure_parsability(false); // no need to retire TLABs
1091 GenPrepareForVerifyClosure blk;
1092 generation_iterate(&blk, false);
1093 }
1094
1095 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1096 bool old_to_young) {
1097 if (old_to_young) {
1098 cl->do_generation(_old_gen);
1099 cl->do_generation(_young_gen);
1100 } else {
1101 cl->do_generation(_young_gen);
1102 cl->do_generation(_old_gen);
1103 }
1104 }
1105
1106 bool GenCollectedHeap::is_maximal_no_gc() const {
1107 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1108 }
1109
1110 void GenCollectedHeap::save_marks() {
1111 _young_gen->save_marks();
1112 _old_gen->save_marks();
1113 }
1114
1115 GenCollectedHeap* GenCollectedHeap::heap() {
1116 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1117 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1118 return _gch;
1119 }
1120
1121
1122 void GenCollectedHeap::prepare_for_compaction() {
1123 guarantee(_n_gens = 2, "Wrong number of generations");
|