8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/barrierSet.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcHeapSummary.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/gcWhen.hpp"
35 #include "gc/shared/vmGCOperations.hpp"
36 #include "logging/log.hpp"
37 #include "memory/metaspace.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/instanceMirrorKlass.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/init.hpp"
42 #include "runtime/thread.inline.hpp"
43 #include "services/heapDumper.hpp"
44
45
46 #ifdef ASSERT
47 int CollectedHeap::_fire_out_of_memory_count = 0;
48 #endif
119 }
120 }
121
122 void CollectedHeap::print_heap_after_gc() {
123 Universe::print_heap_after_gc();
124 if (_gc_heap_log != NULL) {
125 _gc_heap_log->log_heap_after(this);
126 }
127 }
128
129 void CollectedHeap::print_on_error(outputStream* st) const {
130 st->print_cr("Heap:");
131 print_extended_on(st);
132 st->cr();
133
134 _barrier_set->print_on(st);
135 }
136
137 void CollectedHeap::register_nmethod(nmethod* nm) {
138 assert_locked_or_safepoint(CodeCache_lock);
139 }
140
141 void CollectedHeap::unregister_nmethod(nmethod* nm) {
142 assert_locked_or_safepoint(CodeCache_lock);
143 }
144
145 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
146 const GCHeapSummary& heap_summary = create_heap_summary();
147 gc_tracer->report_gc_heap_summary(when, heap_summary);
148
149 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
150 gc_tracer->report_metaspace_summary(when, metaspace_summary);
151 }
152
153 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
154 trace_heap(GCWhen::BeforeGC, gc_tracer);
155 }
156
157 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
158 trace_heap(GCWhen::AfterGC, gc_tracer);
166 }
167
168 const char* const* CollectedHeap::concurrent_phases() const {
169 static const char* const result[] = { NULL };
170 return result;
171 }
172
173 bool CollectedHeap::request_concurrent_phase(const char* phase) {
174 return false;
175 }
176
177 // Memory state functions.
178
179
180 CollectedHeap::CollectedHeap() :
181 _barrier_set(NULL),
182 _is_gc_active(false),
183 _total_collections(0),
184 _total_full_collections(0),
185 _gc_cause(GCCause::_no_gc),
186 _gc_lastcause(GCCause::_no_gc),
187 _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
188 {
189 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
190 const size_t elements_per_word = HeapWordSize / sizeof(jint);
191 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
192 max_len / elements_per_word);
193
194 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
195 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
196
197 if (UsePerfData) {
198 EXCEPTION_MARK;
199
200 // create the gc cause jvmstat counters
201 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
202 80, GCCause::to_string(_gc_cause), CHECK);
203
204 _perf_gc_lastcause =
205 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
206 80, GCCause::to_string(_gc_lastcause), CHECK);
207 }
229 HandleMark hm;
230 do_full_collection(false); // don't clear all soft refs
231 break;
232 }
233 case GCCause::_metadata_GC_clear_soft_refs: {
234 HandleMark hm;
235 do_full_collection(true); // do clear all soft refs
236 break;
237 }
238 default:
239 ShouldNotReachHere(); // Unexpected use of this function
240 }
241 }
242
243 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
244 _barrier_set = barrier_set;
245 oopDesc::set_bs(_barrier_set);
246 }
247
248 void CollectedHeap::pre_initialize() {
249 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
250 // otherwise remains unused.
251 #if defined(COMPILER2) || INCLUDE_JVMCI
252 _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
253 && (DeferInitialCardMark || card_mark_must_follow_store());
254 #else
255 assert(_defer_initial_card_mark == false, "Who would set it?");
256 #endif
257 }
258
259 #ifndef PRODUCT
260 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
261 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
262 for (size_t slot = 0; slot < size; slot += 1) {
263 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
264 "Found badHeapWordValue in post-allocation check");
265 }
266 }
267 }
268
269 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
270 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
271 for (size_t slot = 0; slot < size; slot += 1) {
272 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
273 "Found non badHeapWordValue in pre-allocation check");
274 }
275 }
276 }
322
323 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
324
325 if (ZeroTLAB) {
326 // ..and clear it.
327 Copy::zero_to_words(obj, new_tlab_size);
328 } else {
329 // ...and zap just allocated object.
330 #ifdef ASSERT
331 // Skip mangling the space corresponding to the object header to
332 // ensure that the returned space is not considered parsable by
333 // any concurrent GC thread.
334 size_t hdr_size = oopDesc::header_size();
335 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
336 #endif // ASSERT
337 }
338 thread->tlab().fill(obj, obj + size, new_tlab_size);
339 return obj;
340 }
341
342 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
343 MemRegion deferred = thread->deferred_card_mark();
344 if (!deferred.is_empty()) {
345 assert(_defer_initial_card_mark, "Otherwise should be empty");
346 {
347 // Verify that the storage points to a parsable object in heap
348 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
349 assert(is_in(old_obj), "Not in allocated heap");
350 assert(!can_elide_initializing_store_barrier(old_obj),
351 "Else should have been filtered in new_store_pre_barrier()");
352 assert(old_obj->is_oop(true), "Not an oop");
353 assert(deferred.word_size() == (size_t)(old_obj->size()),
354 "Mismatch: multiple objects?");
355 }
356 BarrierSet* bs = barrier_set();
357 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
358 bs->write_region(deferred);
359 // "Clear" the deferred_card_mark field
360 thread->set_deferred_card_mark(MemRegion());
361 }
362 assert(thread->deferred_card_mark().is_empty(), "invariant");
363 }
364
365 size_t CollectedHeap::max_tlab_size() const {
366 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
367 // This restriction could be removed by enabling filling with multiple arrays.
368 // If we compute that the reasonable way as
369 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
370 // we'll overflow on the multiply, so we do the divide first.
371 // We actually lose a little by dividing first,
372 // but that just makes the TLAB somewhat smaller than the biggest array,
373 // which is fine, since we'll be able to fill that.
374 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
375 sizeof(jint) *
376 ((juint) max_jint / (size_t) HeapWordSize);
377 return align_size_down(max_int_size, MinObjAlignment);
378 }
379
380 // Helper for ReduceInitialCardMarks. For performance,
381 // compiled code may elide card-marks for initializing stores
382 // to a newly allocated object along the fast-path. We
383 // compensate for such elided card-marks as follows:
384 // (a) Generational, non-concurrent collectors, such as
385 // GenCollectedHeap(ParNew,DefNew,Tenured) and
386 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
387 // need the card-mark if and only if the region is
388 // in the old gen, and do not care if the card-mark
389 // succeeds or precedes the initializing stores themselves,
390 // so long as the card-mark is completed before the next
391 // scavenge. For all these cases, we can do a card mark
392 // at the point at which we do a slow path allocation
393 // in the old gen, i.e. in this call.
394 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
395 // in addition that the card-mark for an old gen allocated
396 // object strictly follow any associated initializing stores.
397 // In these cases, the memRegion remembered below is
398 // used to card-mark the entire region either just before the next
399 // slow-path allocation by this thread or just before the next scavenge or
400 // CMS-associated safepoint, whichever of these events happens first.
401 // (The implicit assumption is that the object has been fully
402 // initialized by this point, a fact that we assert when doing the
403 // card-mark.)
404 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
405 // G1 concurrent marking is in progress an SATB (pre-write-)barrier
406 // is used to remember the pre-value of any store. Initializing
407 // stores will not need this barrier, so we need not worry about
408 // compensating for the missing pre-barrier here. Turning now
409 // to the post-barrier, we note that G1 needs a RS update barrier
410 // which simply enqueues a (sequence of) dirty cards which may
411 // optionally be refined by the concurrent update threads. Note
412 // that this barrier need only be applied to a non-young write,
413 // but, like in CMS, because of the presence of concurrent refinement
414 // (much like CMS' precleaning), must strictly follow the oop-store.
415 // Thus, using the same protocol for maintaining the intended
416 // invariants turns out, serendepitously, to be the same for both
417 // G1 and CMS.
418 //
419 // For any future collector, this code should be reexamined with
420 // that specific collector in mind, and the documentation above suitably
421 // extended and updated.
422 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
423 // If a previous card-mark was deferred, flush it now.
424 flush_deferred_store_barrier(thread);
425 if (can_elide_initializing_store_barrier(new_obj) ||
426 new_obj->is_typeArray()) {
427 // Arrays of non-references don't need a pre-barrier.
428 // The deferred_card_mark region should be empty
429 // following the flush above.
430 assert(thread->deferred_card_mark().is_empty(), "Error");
431 } else {
432 MemRegion mr((HeapWord*)new_obj, new_obj->size());
433 assert(!mr.is_empty(), "Error");
434 if (_defer_initial_card_mark) {
435 // Defer the card mark
436 thread->set_deferred_card_mark(mr);
437 } else {
438 // Do the card mark
439 BarrierSet* bs = barrier_set();
440 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
441 bs->write_region(mr);
442 }
443 }
444 return new_obj;
445 }
446
447 size_t CollectedHeap::filler_array_hdr_size() {
448 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
449 }
450
451 size_t CollectedHeap::filler_array_min_size() {
452 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
453 }
454
455 #ifdef ASSERT
456 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
457 {
458 assert(words >= min_fill_size(), "too small to fill");
459 assert(words % MinObjAlignment == 0, "unaligned size");
460 assert(Universe::heap()->is_in_reserved(start), "not in heap");
461 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
462 }
463
464 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
465 {
466 if (ZapFillerObjects && zap) {
529 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
530 guarantee(false, "thread-local allocation buffers not supported");
531 return NULL;
532 }
533
534 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
535 // The second disjunct in the assertion below makes a concession
536 // for the start-up verification done while the VM is being
537 // created. Callers be careful that you know that mutators
538 // aren't going to interfere -- for instance, this is permissible
539 // if we are still single-threaded and have either not yet
540 // started allocating (nothing much to verify) or we have
541 // started allocating but are now a full-fledged JavaThread
542 // (and have thus made our TLAB's) available for filling.
543 assert(SafepointSynchronize::is_at_safepoint() ||
544 !is_init_completed(),
545 "Should only be called at a safepoint or at start-up"
546 " otherwise concurrent mutator activity may make heap "
547 " unparsable again");
548 const bool use_tlab = UseTLAB;
549 const bool deferred = _defer_initial_card_mark;
550 // The main thread starts allocating via a TLAB even before it
551 // has added itself to the threads list at vm boot-up.
552 assert(!use_tlab || Threads::first() != NULL,
553 "Attempt to fill tlabs before main thread has been added"
554 " to threads list is doomed to failure!");
555 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
556 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
557 #if defined(COMPILER2) || INCLUDE_JVMCI
558 // The deferred store barriers must all have been flushed to the
559 // card-table (or other remembered set structure) before GC starts
560 // processing the card-table (or other remembered set).
561 if (deferred) flush_deferred_store_barrier(thread);
562 #else
563 assert(!deferred, "Should be false");
564 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
565 #endif
566 }
567 }
568
569 void CollectedHeap::accumulate_statistics_all_tlabs() {
570 if (UseTLAB) {
571 assert(SafepointSynchronize::is_at_safepoint() ||
572 !is_init_completed(),
573 "should only accumulate statistics on tlabs at safepoint");
574
575 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
576 }
577 }
578
579 void CollectedHeap::resize_all_tlabs() {
580 if (UseTLAB) {
581 assert(SafepointSynchronize::is_at_safepoint() ||
582 !is_init_completed(),
583 "should only resize tlabs at safepoint");
584
585 ThreadLocalAllocBuffer::resize_all_tlabs();
599 ResourceMark rm;
600 VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);
601 inspector.doit();
602 }
603 }
604
605 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
606 full_gc_dump(timer, true);
607 }
608
609 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
610 full_gc_dump(timer, false);
611 }
612
613 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
614 // It is important to do this in a way such that concurrent readers can't
615 // temporarily think something is in the heap. (Seen this happen in asserts.)
616 _reserved.set_word_size(0);
617 _reserved.set_start(start);
618 _reserved.set_end(end);
619 }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/gcHeapSummary.hpp"
31 #include "gc/shared/gcTrace.hpp"
32 #include "gc/shared/gcTraceTime.inline.hpp"
33 #include "gc/shared/gcWhen.hpp"
34 #include "gc/shared/vmGCOperations.hpp"
35 #include "logging/log.hpp"
36 #include "memory/metaspace.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/instanceMirrorKlass.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/init.hpp"
41 #include "runtime/thread.inline.hpp"
42 #include "services/heapDumper.hpp"
43
44
45 #ifdef ASSERT
46 int CollectedHeap::_fire_out_of_memory_count = 0;
47 #endif
118 }
119 }
120
121 void CollectedHeap::print_heap_after_gc() {
122 Universe::print_heap_after_gc();
123 if (_gc_heap_log != NULL) {
124 _gc_heap_log->log_heap_after(this);
125 }
126 }
127
128 void CollectedHeap::print_on_error(outputStream* st) const {
129 st->print_cr("Heap:");
130 print_extended_on(st);
131 st->cr();
132
133 _barrier_set->print_on(st);
134 }
135
136 void CollectedHeap::register_nmethod(nmethod* nm) {
137 assert_locked_or_safepoint(CodeCache_lock);
138 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
139 CodeCache::add_scavenge_root_nmethod(nm);
140 }
141 }
142
143 void CollectedHeap::unregister_nmethod(nmethod* nm) {
144 assert_locked_or_safepoint(CodeCache_lock);
145 }
146
147 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
148 const GCHeapSummary& heap_summary = create_heap_summary();
149 gc_tracer->report_gc_heap_summary(when, heap_summary);
150
151 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
152 gc_tracer->report_metaspace_summary(when, metaspace_summary);
153 }
154
155 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
156 trace_heap(GCWhen::BeforeGC, gc_tracer);
157 }
158
159 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
160 trace_heap(GCWhen::AfterGC, gc_tracer);
168 }
169
170 const char* const* CollectedHeap::concurrent_phases() const {
171 static const char* const result[] = { NULL };
172 return result;
173 }
174
175 bool CollectedHeap::request_concurrent_phase(const char* phase) {
176 return false;
177 }
178
179 // Memory state functions.
180
181
182 CollectedHeap::CollectedHeap() :
183 _barrier_set(NULL),
184 _is_gc_active(false),
185 _total_collections(0),
186 _total_full_collections(0),
187 _gc_cause(GCCause::_no_gc),
188 _gc_lastcause(GCCause::_no_gc)
189 {
190 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
191 const size_t elements_per_word = HeapWordSize / sizeof(jint);
192 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
193 max_len / elements_per_word);
194
195 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
196 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
197
198 if (UsePerfData) {
199 EXCEPTION_MARK;
200
201 // create the gc cause jvmstat counters
202 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
203 80, GCCause::to_string(_gc_cause), CHECK);
204
205 _perf_gc_lastcause =
206 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
207 80, GCCause::to_string(_gc_lastcause), CHECK);
208 }
230 HandleMark hm;
231 do_full_collection(false); // don't clear all soft refs
232 break;
233 }
234 case GCCause::_metadata_GC_clear_soft_refs: {
235 HandleMark hm;
236 do_full_collection(true); // do clear all soft refs
237 break;
238 }
239 default:
240 ShouldNotReachHere(); // Unexpected use of this function
241 }
242 }
243
244 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
245 _barrier_set = barrier_set;
246 oopDesc::set_bs(_barrier_set);
247 }
248
249 void CollectedHeap::pre_initialize() {
250 }
251
252 #ifndef PRODUCT
253 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
254 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
255 for (size_t slot = 0; slot < size; slot += 1) {
256 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
257 "Found badHeapWordValue in post-allocation check");
258 }
259 }
260 }
261
262 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
263 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
264 for (size_t slot = 0; slot < size; slot += 1) {
265 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
266 "Found non badHeapWordValue in pre-allocation check");
267 }
268 }
269 }
315
316 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
317
318 if (ZeroTLAB) {
319 // ..and clear it.
320 Copy::zero_to_words(obj, new_tlab_size);
321 } else {
322 // ...and zap just allocated object.
323 #ifdef ASSERT
324 // Skip mangling the space corresponding to the object header to
325 // ensure that the returned space is not considered parsable by
326 // any concurrent GC thread.
327 size_t hdr_size = oopDesc::header_size();
328 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
329 #endif // ASSERT
330 }
331 thread->tlab().fill(obj, obj + size, new_tlab_size);
332 return obj;
333 }
334
335 size_t CollectedHeap::max_tlab_size() const {
336 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
337 // This restriction could be removed by enabling filling with multiple arrays.
338 // If we compute that the reasonable way as
339 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
340 // we'll overflow on the multiply, so we do the divide first.
341 // We actually lose a little by dividing first,
342 // but that just makes the TLAB somewhat smaller than the biggest array,
343 // which is fine, since we'll be able to fill that.
344 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
345 sizeof(jint) *
346 ((juint) max_jint / (size_t) HeapWordSize);
347 return align_size_down(max_int_size, MinObjAlignment);
348 }
349
350 size_t CollectedHeap::filler_array_hdr_size() {
351 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
352 }
353
354 size_t CollectedHeap::filler_array_min_size() {
355 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
356 }
357
358 #ifdef ASSERT
359 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
360 {
361 assert(words >= min_fill_size(), "too small to fill");
362 assert(words % MinObjAlignment == 0, "unaligned size");
363 assert(Universe::heap()->is_in_reserved(start), "not in heap");
364 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
365 }
366
367 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
368 {
369 if (ZapFillerObjects && zap) {
432 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
433 guarantee(false, "thread-local allocation buffers not supported");
434 return NULL;
435 }
436
437 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
438 // The second disjunct in the assertion below makes a concession
439 // for the start-up verification done while the VM is being
440 // created. Callers be careful that you know that mutators
441 // aren't going to interfere -- for instance, this is permissible
442 // if we are still single-threaded and have either not yet
443 // started allocating (nothing much to verify) or we have
444 // started allocating but are now a full-fledged JavaThread
445 // (and have thus made our TLAB's) available for filling.
446 assert(SafepointSynchronize::is_at_safepoint() ||
447 !is_init_completed(),
448 "Should only be called at a safepoint or at start-up"
449 " otherwise concurrent mutator activity may make heap "
450 " unparsable again");
451 const bool use_tlab = UseTLAB;
452 // The main thread starts allocating via a TLAB even before it
453 // has added itself to the threads list at vm boot-up.
454 assert(!use_tlab || Threads::first() != NULL,
455 "Attempt to fill tlabs before main thread has been added"
456 " to threads list is doomed to failure!");
457 BarrierSet *bs = barrier_set();
458 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
459 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
460 bs->make_parsable(thread);
461 }
462 }
463
464 void CollectedHeap::accumulate_statistics_all_tlabs() {
465 if (UseTLAB) {
466 assert(SafepointSynchronize::is_at_safepoint() ||
467 !is_init_completed(),
468 "should only accumulate statistics on tlabs at safepoint");
469
470 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
471 }
472 }
473
474 void CollectedHeap::resize_all_tlabs() {
475 if (UseTLAB) {
476 assert(SafepointSynchronize::is_at_safepoint() ||
477 !is_init_completed(),
478 "should only resize tlabs at safepoint");
479
480 ThreadLocalAllocBuffer::resize_all_tlabs();
494 ResourceMark rm;
495 VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);
496 inspector.doit();
497 }
498 }
499
500 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
501 full_gc_dump(timer, true);
502 }
503
504 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
505 full_gc_dump(timer, false);
506 }
507
508 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
509 // It is important to do this in a way such that concurrent readers can't
510 // temporarily think something is in the heap. (Seen this happen in asserts.)
511 _reserved.set_word_size(0);
512 _reserved.set_start(start);
513 _reserved.set_end(end);
514 }
515
516 void CollectedHeap::verify_nmethod_roots(nmethod* nmethod) {
517 nmethod->verify_scavenge_root_oops();
518 }
|