< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 49851 : imported patch 8191471-g1-varying-tlab-allocation
rev 49852 : imported patch 8191471-g1-retained-mutator-region


 351 
 352 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 353 
 354   // Retain tlab and allocate object in shared space if
 355   // the amount free in the tlab is too large to discard.
 356   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 357     thread->tlab().record_slow_allocation(size);
 358     return NULL;
 359   }
 360 
 361   // Discard tlab and allocate a new one.
 362   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 363   size_t new_tlab_size = thread->tlab().compute_size(size);
 364 
 365   thread->tlab().clear_before_allocation();
 366 
 367   if (new_tlab_size == 0) {
 368     return NULL;
 369   }
 370 
 371   // Allocate a new TLAB...
 372   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);



 373   if (obj == NULL) {
 374     return NULL;
 375   }


 376 
 377   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 378 
 379   if (ZeroTLAB) {
 380     // ..and clear it.
 381     Copy::zero_to_words(obj, new_tlab_size);
 382   } else {
 383     // ...and zap just allocated object.
 384 #ifdef ASSERT
 385     // Skip mangling the space corresponding to the object header to
 386     // ensure that the returned space is not considered parsable by
 387     // any concurrent GC thread.
 388     size_t hdr_size = oopDesc::header_size();
 389     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 390 #endif // ASSERT
 391   }
 392   thread->tlab().fill(obj, obj + size, new_tlab_size);
 393   return obj;
 394 }
 395 
 396 size_t CollectedHeap::max_tlab_size() const {
 397   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 398   // This restriction could be removed by enabling filling with multiple arrays.
 399   // If we compute that the reasonable way as
 400   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 401   // we'll overflow on the multiply, so we do the divide first.
 402   // We actually lose a little by dividing first,
 403   // but that just makes the TLAB  somewhat smaller than the biggest array,
 404   // which is fine, since we'll be able to fill that.
 405   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 406               sizeof(jint) *
 407               ((juint) max_jint / (size_t) HeapWordSize);
 408   return align_down(max_int_size, MinObjAlignment);
 409 }
 410 
 411 size_t CollectedHeap::filler_array_hdr_size() {
 412   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long


 473 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 474 {
 475   DEBUG_ONLY(fill_args_check(start, words);)
 476   HandleMark hm;  // Free handles before leaving.
 477 
 478   // Multiple objects may be required depending on the filler array maximum size. Fill
 479   // the range up to that with objects that are filler_array_max_size sized. The
 480   // remainder is filled with a single object.
 481   const size_t min = min_fill_size();
 482   const size_t max = filler_array_max_size();
 483   while (words > max) {
 484     const size_t cur = (words - max) >= min ? max : max - min;
 485     fill_with_array(start, cur, zap);
 486     start += cur;
 487     words -= cur;
 488   }
 489 
 490   fill_with_object_impl(start, words, zap);
 491 }
 492 
 493 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {


 494   guarantee(false, "thread-local allocation buffers not supported");
 495   return NULL;
 496 }
 497 
 498 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 499   // The second disjunct in the assertion below makes a concession
 500   // for the start-up verification done while the VM is being
 501   // created. Callers be careful that you know that mutators
 502   // aren't going to interfere -- for instance, this is permissible
 503   // if we are still single-threaded and have either not yet
 504   // started allocating (nothing much to verify) or we have
 505   // started allocating but are now a full-fledged JavaThread
 506   // (and have thus made our TLAB's) available for filling.
 507   assert(SafepointSynchronize::is_at_safepoint() ||
 508          !is_init_completed(),
 509          "Should only be called at a safepoint or at start-up"
 510          " otherwise concurrent mutator activity may make heap "
 511          " unparsable again");
 512   const bool use_tlab = UseTLAB;
 513   // The main thread starts allocating via a TLAB even before it




 351 
 352 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 353 
 354   // Retain tlab and allocate object in shared space if
 355   // the amount free in the tlab is too large to discard.
 356   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 357     thread->tlab().record_slow_allocation(size);
 358     return NULL;
 359   }
 360 
 361   // Discard tlab and allocate a new one.
 362   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 363   size_t new_tlab_size = thread->tlab().compute_size(size);
 364 
 365   thread->tlab().clear_before_allocation();
 366 
 367   if (new_tlab_size == 0) {
 368     return NULL;
 369   }
 370 
 371   // Allocate a new TLAB requesting new_tlab_size. Any size
 372   // between minimal and new_tlab_size is accepted.
 373   size_t actual_tlab_size = 0;
 374   size_t minimal_tlab_size = MAX2(ThreadLocalAllocBuffer::compute_min_size(size), MinTLABSize);
 375   HeapWord* obj = Universe::heap()->allocate_new_tlab(minimal_tlab_size, new_tlab_size, &actual_tlab_size);
 376   if (obj == NULL) {
 377     return NULL;
 378   }
 379   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
 380          p2i(obj), minimal_tlab_size, new_tlab_size);
 381 
 382   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 383 
 384   if (ZeroTLAB) {
 385     // ..and clear it.
 386     Copy::zero_to_words(obj, actual_tlab_size);
 387   } else {
 388     // ...and zap just allocated object.
 389 #ifdef ASSERT
 390     // Skip mangling the space corresponding to the object header to
 391     // ensure that the returned space is not considered parsable by
 392     // any concurrent GC thread.
 393     size_t hdr_size = oopDesc::header_size();
 394     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
 395 #endif // ASSERT
 396   }
 397   thread->tlab().fill(obj, obj + size, actual_tlab_size);
 398   return obj;
 399 }
 400 
 401 size_t CollectedHeap::max_tlab_size() const {
 402   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 403   // This restriction could be removed by enabling filling with multiple arrays.
 404   // If we compute that the reasonable way as
 405   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 406   // we'll overflow on the multiply, so we do the divide first.
 407   // We actually lose a little by dividing first,
 408   // but that just makes the TLAB  somewhat smaller than the biggest array,
 409   // which is fine, since we'll be able to fill that.
 410   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 411               sizeof(jint) *
 412               ((juint) max_jint / (size_t) HeapWordSize);
 413   return align_down(max_int_size, MinObjAlignment);
 414 }
 415 
 416 size_t CollectedHeap::filler_array_hdr_size() {
 417   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long


 478 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 479 {
 480   DEBUG_ONLY(fill_args_check(start, words);)
 481   HandleMark hm;  // Free handles before leaving.
 482 
 483   // Multiple objects may be required depending on the filler array maximum size. Fill
 484   // the range up to that with objects that are filler_array_max_size sized. The
 485   // remainder is filled with a single object.
 486   const size_t min = min_fill_size();
 487   const size_t max = filler_array_max_size();
 488   while (words > max) {
 489     const size_t cur = (words - max) >= min ? max : max - min;
 490     fill_with_array(start, cur, zap);
 491     start += cur;
 492     words -= cur;
 493   }
 494 
 495   fill_with_object_impl(start, words, zap);
 496 }
 497 
 498 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_word_size,
 499                                            size_t desired_word_size,
 500                                            size_t* actual_word_size) {
 501   guarantee(false, "thread-local allocation buffers not supported");
 502   return NULL;
 503 }
 504 
 505 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 506   // The second disjunct in the assertion below makes a concession
 507   // for the start-up verification done while the VM is being
 508   // created. Callers be careful that you know that mutators
 509   // aren't going to interfere -- for instance, this is permissible
 510   // if we are still single-threaded and have either not yet
 511   // started allocating (nothing much to verify) or we have
 512   // started allocating but are now a full-fledged JavaThread
 513   // (and have thus made our TLAB's) available for filling.
 514   assert(SafepointSynchronize::is_at_safepoint() ||
 515          !is_init_completed(),
 516          "Should only be called at a safepoint or at start-up"
 517          " otherwise concurrent mutator activity may make heap "
 518          " unparsable again");
 519   const bool use_tlab = UseTLAB;
 520   // The main thread starts allocating via a TLAB even before it


< prev index next >