< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 49945 : imported patch 8191471-g1-varying-tlab-allocation
rev 49946 : imported patch 8191471-g1-retained-mutator-region
rev 49949 : imported patch 8191471-tschatzl-comments-open
rev 49950 : [mq]: 8191471-pliden-comments


 367 
 368 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 369 
 370   // Retain tlab and allocate object in shared space if
 371   // the amount free in the tlab is too large to discard.
 372   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 373     thread->tlab().record_slow_allocation(size);
 374     return NULL;
 375   }
 376 
 377   // Discard tlab and allocate a new one.
 378   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 379   size_t new_tlab_size = thread->tlab().compute_size(size);
 380 
 381   thread->tlab().clear_before_allocation();
 382 
 383   if (new_tlab_size == 0) {
 384     return NULL;
 385   }
 386 
 387   // Allocate a new TLAB...
 388   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);



 389   if (obj == NULL) {


 390     return NULL;
 391   }


 392 
 393   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 394 
 395   if (ZeroTLAB) {
 396     // ..and clear it.
 397     Copy::zero_to_words(obj, new_tlab_size);
 398   } else {
 399     // ...and zap just allocated object.
 400 #ifdef ASSERT
 401     // Skip mangling the space corresponding to the object header to
 402     // ensure that the returned space is not considered parsable by
 403     // any concurrent GC thread.
 404     size_t hdr_size = oopDesc::header_size();
 405     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 406 #endif // ASSERT
 407   }
 408   thread->tlab().fill(obj, obj + size, new_tlab_size);
 409   return obj;
 410 }
 411 
 412 size_t CollectedHeap::max_tlab_size() const {
 413   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 414   // This restriction could be removed by enabling filling with multiple arrays.
 415   // If we compute that the reasonable way as
 416   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 417   // we'll overflow on the multiply, so we do the divide first.
 418   // We actually lose a little by dividing first,
 419   // but that just makes the TLAB  somewhat smaller than the biggest array,
 420   // which is fine, since we'll be able to fill that.
 421   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 422               sizeof(jint) *
 423               ((juint) max_jint / (size_t) HeapWordSize);
 424   return align_down(max_int_size, MinObjAlignment);
 425 }
 426 
 427 size_t CollectedHeap::filler_array_hdr_size() {
 428   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long


 489 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 490 {
 491   DEBUG_ONLY(fill_args_check(start, words);)
 492   HandleMark hm;  // Free handles before leaving.
 493 
 494   // Multiple objects may be required depending on the filler array maximum size. Fill
 495   // the range up to that with objects that are filler_array_max_size sized. The
 496   // remainder is filled with a single object.
 497   const size_t min = min_fill_size();
 498   const size_t max = filler_array_max_size();
 499   while (words > max) {
 500     const size_t cur = (words - max) >= min ? max : max - min;
 501     fill_with_array(start, cur, zap);
 502     start += cur;
 503     words -= cur;
 504   }
 505 
 506   fill_with_object_impl(start, words, zap);
 507 }
 508 
 509 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {


 510   guarantee(false, "thread-local allocation buffers not supported");
 511   return NULL;
 512 }
 513 
 514 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 515   // The second disjunct in the assertion below makes a concession
 516   // for the start-up verification done while the VM is being
 517   // created. Callers be careful that you know that mutators
 518   // aren't going to interfere -- for instance, this is permissible
 519   // if we are still single-threaded and have either not yet
 520   // started allocating (nothing much to verify) or we have
 521   // started allocating but are now a full-fledged JavaThread
 522   // (and have thus made our TLAB's) available for filling.
 523   assert(SafepointSynchronize::is_at_safepoint() ||
 524          !is_init_completed(),
 525          "Should only be called at a safepoint or at start-up"
 526          " otherwise concurrent mutator activity may make heap "
 527          " unparsable again");
 528   const bool use_tlab = UseTLAB;
 529   // The main thread starts allocating via a TLAB even before it




 367 
 368 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 369 
 370   // Retain tlab and allocate object in shared space if
 371   // the amount free in the tlab is too large to discard.
 372   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 373     thread->tlab().record_slow_allocation(size);
 374     return NULL;
 375   }
 376 
 377   // Discard tlab and allocate a new one.
 378   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 379   size_t new_tlab_size = thread->tlab().compute_size(size);
 380 
 381   thread->tlab().clear_before_allocation();
 382 
 383   if (new_tlab_size == 0) {
 384     return NULL;
 385   }
 386 
 387   // Allocate a new TLAB requesting new_tlab_size. Any size
 388   // between minimal and new_tlab_size is accepted.
 389   size_t actual_tlab_size = 0;
 390   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
 391   HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
 392   if (obj == NULL) {
 393     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
 394            min_tlab_size, new_tlab_size, actual_tlab_size);
 395     return NULL;
 396   }
 397   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
 398          p2i(obj), min_tlab_size, new_tlab_size);
 399 
 400   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 401 
 402   if (ZeroTLAB) {
 403     // ..and clear it.
 404     Copy::zero_to_words(obj, actual_tlab_size);
 405   } else {
 406     // ...and zap just allocated object.
 407 #ifdef ASSERT
 408     // Skip mangling the space corresponding to the object header to
 409     // ensure that the returned space is not considered parsable by
 410     // any concurrent GC thread.
 411     size_t hdr_size = oopDesc::header_size();
 412     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
 413 #endif // ASSERT
 414   }
 415   thread->tlab().fill(obj, obj + size, actual_tlab_size);
 416   return obj;
 417 }
 418 
 419 size_t CollectedHeap::max_tlab_size() const {
 420   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 421   // This restriction could be removed by enabling filling with multiple arrays.
 422   // If we compute that the reasonable way as
 423   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 424   // we'll overflow on the multiply, so we do the divide first.
 425   // We actually lose a little by dividing first,
 426   // but that just makes the TLAB  somewhat smaller than the biggest array,
 427   // which is fine, since we'll be able to fill that.
 428   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 429               sizeof(jint) *
 430               ((juint) max_jint / (size_t) HeapWordSize);
 431   return align_down(max_int_size, MinObjAlignment);
 432 }
 433 
 434 size_t CollectedHeap::filler_array_hdr_size() {
 435   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long


 496 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 497 {
 498   DEBUG_ONLY(fill_args_check(start, words);)
 499   HandleMark hm;  // Free handles before leaving.
 500 
 501   // Multiple objects may be required depending on the filler array maximum size. Fill
 502   // the range up to that with objects that are filler_array_max_size sized. The
 503   // remainder is filled with a single object.
 504   const size_t min = min_fill_size();
 505   const size_t max = filler_array_max_size();
 506   while (words > max) {
 507     const size_t cur = (words - max) >= min ? max : max - min;
 508     fill_with_array(start, cur, zap);
 509     start += cur;
 510     words -= cur;
 511   }
 512 
 513   fill_with_object_impl(start, words, zap);
 514 }
 515 
 516 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
 517                                            size_t requested_size,
 518                                            size_t* actual_size) {
 519   guarantee(false, "thread-local allocation buffers not supported");
 520   return NULL;
 521 }
 522 
 523 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 524   // The second disjunct in the assertion below makes a concession
 525   // for the start-up verification done while the VM is being
 526   // created. Callers be careful that you know that mutators
 527   // aren't going to interfere -- for instance, this is permissible
 528   // if we are still single-threaded and have either not yet
 529   // started allocating (nothing much to verify) or we have
 530   // started allocating but are now a full-fledged JavaThread
 531   // (and have thus made our TLAB's) available for filling.
 532   assert(SafepointSynchronize::is_at_safepoint() ||
 533          !is_init_completed(),
 534          "Should only be called at a safepoint or at start-up"
 535          " otherwise concurrent mutator activity may make heap "
 536          " unparsable again");
 537   const bool use_tlab = UseTLAB;
 538   // The main thread starts allocating via a TLAB even before it


< prev index next >