< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 47590 : [mq]: heap8
rev 47591 : [mq]: heap10a
rev 47592 : [mq]: heap14_rebased


 274 void CollectedHeap::check_for_valid_allocation_state() {
 275   Thread *thread = Thread::current();
 276   // How to choose between a pending exception and a potential
 277   // OutOfMemoryError?  Don't allow pending exceptions.
 278   // This is a VM policy failure, so how do we exhaustively test it?
 279   assert(!thread->has_pending_exception(),
 280          "shouldn't be allocating with pending exception");
 281   if (StrictSafepointChecks) {
 282     assert(thread->allow_allocation(),
 283            "Allocation done by thread for which allocation is blocked "
 284            "by No_Allocation_Verifier!");
 285     // Allocation of an oop can always invoke a safepoint,
 286     // hence, the true argument
 287     thread->check_for_valid_safepoint_state(true);
 288   }
 289 }
 290 #endif
 291 
 292 
 293 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj,
 294                                       size_t size, size_t fix_sample_rate) {
 295   // Object is allocated, sample it now.
 296   HeapMonitoring::object_alloc_do_sample(thread,
 297                                          reinterpret_cast<oopDesc*>(obj),
 298                                          size * HeapWordSize);
 299   // Pick a next sample in this case, we allocated right.
 300   thread->tlab().pick_next_sample(fix_sample_rate);
 301 }
 302 
 303 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) {
 304   thread->tlab().set_back_actual_end();
 305 
 306   // The tlab could still have space after this sample.
 307   return thread->tlab().allocate(size);
 308 }
 309 
 310 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 311   // In case the tlab changes, remember if this one wanted a sample.
 312   bool should_sample = thread->tlab().should_sample() && HeapMonitoring::enabled();
 313 
 314   HeapWord* obj = NULL;
 315   if (should_sample) {
 316     // Remember the tlab end to fix up the sampling rate.
 317     HeapWord *tlab_old_end = thread->tlab().end();
 318     obj = allocate_sampled_object(thread, size);
 319 
 320     // If we did allocate in this tlab, sample it. Otherwise, we wait for the
 321     // new tlab's first allocation at the end of this method.
 322     if (obj != NULL) {
 323       // Fix sample rate by removing the extra bytes allocated in this last
 324       // sample.
 325       size_t fix_sample_rate = thread->tlab().top() - tlab_old_end;
 326       sample_allocation(thread, obj, size, fix_sample_rate);
 327       return obj;
 328     }
 329   }
 330 
 331   // Retain tlab and allocate object in shared space if
 332   // the amount free in the tlab is too large to discard.
 333   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 334     thread->tlab().record_slow_allocation(size);
 335     return NULL;
 336   }
 337 
 338   // Discard tlab and allocate a new one.
 339   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 340   size_t new_tlab_size = thread->tlab().compute_size(size);
 341 
 342   thread->tlab().clear_before_allocation();
 343 
 344   if (new_tlab_size == 0) {
 345     return NULL;
 346   }




 274 void CollectedHeap::check_for_valid_allocation_state() {
 275   Thread *thread = Thread::current();
 276   // How to choose between a pending exception and a potential
 277   // OutOfMemoryError?  Don't allow pending exceptions.
 278   // This is a VM policy failure, so how do we exhaustively test it?
 279   assert(!thread->has_pending_exception(),
 280          "shouldn't be allocating with pending exception");
 281   if (StrictSafepointChecks) {
 282     assert(thread->allow_allocation(),
 283            "Allocation done by thread for which allocation is blocked "
 284            "by No_Allocation_Verifier!");
 285     // Allocation of an oop can always invoke a safepoint,
 286     // hence, the true argument
 287     thread->check_for_valid_safepoint_state(true);
 288   }
 289 }
 290 #endif
 291 
 292 
 293 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj,
 294                                       size_t size, size_t overflowed_words) {
 295   // Object is allocated, sample it now.
 296   HeapMonitoring::object_alloc_do_sample(thread,
 297                                          reinterpret_cast<oopDesc*>(obj),
 298                                          size * HeapWordSize);
 299   // Pick a next sample in this case, we allocated right.
 300   thread->tlab().pick_next_sample(overflowed_words);
 301 }
 302 
 303 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) {
 304   thread->tlab().set_back_actual_end();
 305 
 306   // The tlab could still have space after this sample.
 307   return thread->tlab().allocate(size);
 308 }
 309 
 310 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 311   // In case the tlab changes, remember if this one wanted a sample.
 312   bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample();
 313 
 314   HeapWord* obj = NULL;
 315   if (should_sample) {
 316     // Remember the tlab end to fix up the sampling rate.
 317     HeapWord *tlab_old_end = thread->tlab().end();
 318     obj = allocate_sampled_object(thread, size);
 319 
 320     // If we did allocate in this tlab, sample it. Otherwise, we wait for the
 321     // new tlab's first allocation at the end of this method.
 322     if (obj != NULL) {
 323       // Fix sample rate by removing the extra words allocated in this last
 324       // sample.
 325       size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end);
 326       sample_allocation(thread, obj, size, overflowed_words);
 327       return obj;
 328     }
 329   }
 330 
 331   // Retain tlab and allocate object in shared space if
 332   // the amount free in the tlab is too large to discard.
 333   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 334     thread->tlab().record_slow_allocation(size);
 335     return NULL;
 336   }
 337 
 338   // Discard tlab and allocate a new one.
 339   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 340   size_t new_tlab_size = thread->tlab().compute_size(size);
 341 
 342   thread->tlab().clear_before_allocation();
 343 
 344   if (new_tlab_size == 0) {
 345     return NULL;
 346   }


< prev index next >