83 // Allocate space for the heap.
84
85 char* heap_address;
86 ReservedSpace heap_rs;
87
88 size_t heap_alignment = collector_policy()->heap_alignment();
89
90 heap_address = allocate(heap_alignment, &heap_rs);
91
92 if (!heap_rs.is_reserved()) {
93 vm_shutdown_during_initialization(
94 "Could not reserve enough space for object heap");
95 return JNI_ENOMEM;
96 }
97
98 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
99
100 _rem_set = collector_policy()->create_rem_set(reserved_region());
101 set_barrier_set(rem_set()->bs());
102
103 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
104 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
105 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
106
107 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
108 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
109 clear_incremental_collection_failed();
110
111 return JNI_OK;
112 }
113
114 char* GenCollectedHeap::allocate(size_t alignment,
115 ReservedSpace* heap_rs){
116 // Now figure out the total size.
117 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
118 assert(alignment % pageSize == 0, "Must be");
119
120 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
121 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
122
123 // Check for overflow.
124 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
125 if (total_reserved < young_spec->max_size()) {
126 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
127 "the maximum representable size");
128 }
253 }
254
255 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
256 bool* gc_overhead_limit_was_exceeded) {
257 return gen_policy()->mem_allocate_work(size,
258 false /* is_tlab */,
259 gc_overhead_limit_was_exceeded);
260 }
261
262 bool GenCollectedHeap::must_clear_all_soft_refs() {
263 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
264 _gc_cause == GCCause::_wb_full_gc;
265 }
266
267 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
268 bool is_tlab, bool run_verification, bool clear_soft_refs,
269 bool restore_marks_for_biased_locking) {
270 FormatBuffer<> title("Collect gen: %s", gen->short_name());
271 GCTraceTime(Trace, gc, phases) t1(title);
272 TraceCollectorStats tcs(gen->counters());
273 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
274
275 gen->stat_record()->invocations++;
276 gen->stat_record()->accumulated_time.start();
277
278 // Must be done anew before each collection because
279 // a previous collection will do mangling and will
280 // change top of some spaces.
281 record_gen_tops_before_GC();
282
283 log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
284
285 if (run_verification && VerifyBeforeGC) {
286 HandleMark hm; // Discard invalid handles created during verification
287 Universe::verify("Before GC");
288 }
289 COMPILER2_PRESENT(DerivedPointerTable::clear());
290
291 if (restore_marks_for_biased_locking) {
292 // We perform this mark word preservation work lazily
293 // because it's only at this point that we know whether we
|
83 // Allocate space for the heap.
84
85 char* heap_address;
86 ReservedSpace heap_rs;
87
88 size_t heap_alignment = collector_policy()->heap_alignment();
89
90 heap_address = allocate(heap_alignment, &heap_rs);
91
92 if (!heap_rs.is_reserved()) {
93 vm_shutdown_during_initialization(
94 "Could not reserve enough space for object heap");
95 return JNI_ENOMEM;
96 }
97
98 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
99
100 _rem_set = collector_policy()->create_rem_set(reserved_region());
101 set_barrier_set(rem_set()->bs());
102
103 init_memory_managers();
104
105 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
106 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set(), _minor_mgr);
107 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
108
109 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
110 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set(), _major_mgr);
111 clear_incremental_collection_failed();
112
113 return JNI_OK;
114 }
115
116 char* GenCollectedHeap::allocate(size_t alignment,
117 ReservedSpace* heap_rs){
118 // Now figure out the total size.
119 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
120 assert(alignment % pageSize == 0, "Must be");
121
122 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
123 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
124
125 // Check for overflow.
126 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
127 if (total_reserved < young_spec->max_size()) {
128 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
129 "the maximum representable size");
130 }
255 }
256
257 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
258 bool* gc_overhead_limit_was_exceeded) {
259 return gen_policy()->mem_allocate_work(size,
260 false /* is_tlab */,
261 gc_overhead_limit_was_exceeded);
262 }
263
264 bool GenCollectedHeap::must_clear_all_soft_refs() {
265 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
266 _gc_cause == GCCause::_wb_full_gc;
267 }
268
269 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
270 bool is_tlab, bool run_verification, bool clear_soft_refs,
271 bool restore_marks_for_biased_locking) {
272 FormatBuffer<> title("Collect gen: %s", gen->short_name());
273 GCTraceTime(Trace, gc, phases) t1(title);
274 TraceCollectorStats tcs(gen->counters());
275 TraceMemoryManagerStats tmms(gen->memory_manager(), gc_cause());
276
277 gen->stat_record()->invocations++;
278 gen->stat_record()->accumulated_time.start();
279
280 // Must be done anew before each collection because
281 // a previous collection will do mangling and will
282 // change top of some spaces.
283 record_gen_tops_before_GC();
284
285 log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
286
287 if (run_verification && VerifyBeforeGC) {
288 HandleMark hm; // Discard invalid handles created during verification
289 Universe::verify("Before GC");
290 }
291 COMPILER2_PRESENT(DerivedPointerTable::clear());
292
293 if (restore_marks_for_biased_locking) {
294 // We perform this mark word preservation work lazily
295 // because it's only at this point that we know whether we
|