21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/barrierSet.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcHeapSummary.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/gcWhen.hpp"
35 #include "gc/shared/vmGCOperations.hpp"
36 #include "logging/log.hpp"
37 #include "memory/metaspace.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/instanceMirrorKlass.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/init.hpp"
42 #include "runtime/thread.inline.hpp"
43 #include "services/heapDumper.hpp"
44 #include "utilities/align.hpp"
45
46
47 #ifdef ASSERT
48 int CollectedHeap::_fire_out_of_memory_count = 0;
49 #endif
50
51 size_t CollectedHeap::_filler_array_max_size = 0;
52
53 template <>
54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
55 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
56 st->print_raw(m);
57 }
58
59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
60 if (!should_log()) {
271
272 #ifdef ASSERT
273 void CollectedHeap::check_for_valid_allocation_state() {
274 Thread *thread = Thread::current();
275 // How to choose between a pending exception and a potential
276 // OutOfMemoryError? Don't allow pending exceptions.
277 // This is a VM policy failure, so how do we exhaustively test it?
278 assert(!thread->has_pending_exception(),
279 "shouldn't be allocating with pending exception");
280 if (StrictSafepointChecks) {
281 assert(thread->allow_allocation(),
282 "Allocation done by thread for which allocation is blocked "
283 "by No_Allocation_Verifier!");
284 // Allocation of an oop can always invoke a safepoint,
285 // hence, the true argument
286 thread->check_for_valid_safepoint_state(true);
287 }
288 }
289 #endif
290
291 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
292
293 // Retain tlab and allocate object in shared space if
294 // the amount free in the tlab is too large to discard.
295 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
296 thread->tlab().record_slow_allocation(size);
297 return NULL;
298 }
299
300 // Discard tlab and allocate a new one.
301 // To minimize fragmentation, the last TLAB may be smaller than the rest.
302 size_t new_tlab_size = thread->tlab().compute_size(size);
303
304 thread->tlab().clear_before_allocation();
305
306 if (new_tlab_size == 0) {
307 return NULL;
308 }
309
310 // Allocate a new TLAB...
311 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
312 if (obj == NULL) {
313 return NULL;
314 }
315
316 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
317
318 if (ZeroTLAB) {
319 // ..and clear it.
320 Copy::zero_to_words(obj, new_tlab_size);
321 } else {
322 // ...and zap just allocated object.
323 #ifdef ASSERT
324 // Skip mangling the space corresponding to the object header to
325 // ensure that the returned space is not considered parsable by
326 // any concurrent GC thread.
327 size_t hdr_size = oopDesc::header_size();
328 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
329 #endif // ASSERT
330 }
331 thread->tlab().fill(obj, obj + size, new_tlab_size);
332 return obj;
333 }
334
335 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
336 MemRegion deferred = thread->deferred_card_mark();
337 if (!deferred.is_empty()) {
338 assert(_defer_initial_card_mark, "Otherwise should be empty");
339 {
340 // Verify that the storage points to a parsable object in heap
341 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
342 assert(is_in(old_obj), "Not in allocated heap");
343 assert(!can_elide_initializing_store_barrier(old_obj),
344 "Else should have been filtered in new_store_pre_barrier()");
345 assert(oopDesc::is_oop(old_obj, true), "Not an oop");
346 assert(deferred.word_size() == (size_t)(old_obj->size()),
347 "Mismatch: multiple objects?");
348 }
349 BarrierSet* bs = barrier_set();
350 bs->write_region(deferred);
351 // "Clear" the deferred_card_mark field
|
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/barrierSet.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcHeapSummary.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/gcWhen.hpp"
35 #include "gc/shared/vmGCOperations.hpp"
36 #include "logging/log.hpp"
37 #include "memory/metaspace.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/instanceMirrorKlass.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/heapMonitoring.hpp"
42 #include "runtime/init.hpp"
43 #include "runtime/thread.inline.hpp"
44 #include "services/heapDumper.hpp"
45 #include "utilities/align.hpp"
46
47
48 #ifdef ASSERT
49 int CollectedHeap::_fire_out_of_memory_count = 0;
50 #endif
51
52 size_t CollectedHeap::_filler_array_max_size = 0;
53
54 template <>
55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
56 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
57 st->print_raw(m);
58 }
59
60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
61 if (!should_log()) {
272
273 #ifdef ASSERT
274 void CollectedHeap::check_for_valid_allocation_state() {
275 Thread *thread = Thread::current();
276 // How to choose between a pending exception and a potential
277 // OutOfMemoryError? Don't allow pending exceptions.
278 // This is a VM policy failure, so how do we exhaustively test it?
279 assert(!thread->has_pending_exception(),
280 "shouldn't be allocating with pending exception");
281 if (StrictSafepointChecks) {
282 assert(thread->allow_allocation(),
283 "Allocation done by thread for which allocation is blocked "
284 "by No_Allocation_Verifier!");
285 // Allocation of an oop can always invoke a safepoint,
286 // hence, the true argument
287 thread->check_for_valid_safepoint_state(true);
288 }
289 }
290 #endif
291
292
293 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj,
294 size_t size, size_t overflowed_words) {
295 // Object is allocated, sample it now.
296 HeapMonitoring::object_alloc_do_sample(thread,
297 reinterpret_cast<oopDesc*>(obj),
298 size * HeapWordSize);
299 // Pick a next sample in this case, we allocated right.
300 thread->tlab().pick_next_sample(overflowed_words);
301 }
302
303 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) {
304 thread->tlab().set_back_actual_end();
305
306 // The tlab could still have space after this sample.
307 return thread->tlab().allocate(size);
308 }
309
310 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
311 // In case the tlab changes, remember if this one wanted a sample.
312 bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample();
313
314 HeapWord* obj = NULL;
315 if (should_sample) {
316 // Remember the tlab end to fix up the sampling rate.
317 HeapWord *tlab_old_end = thread->tlab().end();
318 obj = allocate_sampled_object(thread, size);
319
320 // If we did allocate in this tlab, sample it. Otherwise, we wait for the
321 // new tlab's first allocation at the end of this method.
322 if (obj != NULL) {
323 // Fix sample rate by removing the extra words allocated in this last
324 // sample.
325 size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end);
326 sample_allocation(thread, obj, size, overflowed_words);
327 return obj;
328 }
329 }
330
331 // Retain tlab and allocate object in shared space if
332 // the amount free in the tlab is too large to discard.
333 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
334 thread->tlab().record_slow_allocation(size);
335 return NULL;
336 }
337
338 // Discard tlab and allocate a new one.
339 // To minimize fragmentation, the last TLAB may be smaller than the rest.
340 size_t new_tlab_size = thread->tlab().compute_size(size);
341
342 thread->tlab().clear_before_allocation();
343
344 if (new_tlab_size == 0) {
345 return NULL;
346 }
347
348 // Allocate a new TLAB...
349 obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
350 if (obj == NULL) {
351 return NULL;
352 }
353
354 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
355
356 if (ZeroTLAB) {
357 // ..and clear it.
358 Copy::zero_to_words(obj, new_tlab_size);
359 } else {
360 // ...and zap just allocated object.
361 #ifdef ASSERT
362 // Skip mangling the space corresponding to the object header to
363 // ensure that the returned space is not considered parsable by
364 // any concurrent GC thread.
365 size_t hdr_size = oopDesc::header_size();
366 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
367 #endif // ASSERT
368 }
369 thread->tlab().fill(obj, obj + size, new_tlab_size);
370
371 // Did we initially want to sample?
372 if (should_sample) {
373 sample_allocation(thread, obj, size);
374 }
375 return obj;
376 }
377
378 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
379 MemRegion deferred = thread->deferred_card_mark();
380 if (!deferred.is_empty()) {
381 assert(_defer_initial_card_mark, "Otherwise should be empty");
382 {
383 // Verify that the storage points to a parsable object in heap
384 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
385 assert(is_in(old_obj), "Not in allocated heap");
386 assert(!can_elide_initializing_store_barrier(old_obj),
387 "Else should have been filtered in new_store_pre_barrier()");
388 assert(oopDesc::is_oop(old_obj, true), "Not an oop");
389 assert(deferred.word_size() == (size_t)(old_obj->size()),
390 "Mismatch: multiple objects?");
391 }
392 BarrierSet* bs = barrier_set();
393 bs->write_region(deferred);
394 // "Clear" the deferred_card_mark field
|