< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 50094 : [mq]: gclab.patch
rev 50095 : [mq]: allocations-rt.patch


 373   if (obj == NULL) {
 374     return NULL;
 375   }
 376 
 377   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 378 
 379   if (ZeroTLAB) {
 380     // ..and clear it.
 381     Copy::zero_to_words(obj, new_tlab_size);
 382   } else {
 383     // ...and zap just allocated object.
 384 #ifdef ASSERT
 385     // Skip mangling the space corresponding to the object header to
 386     // ensure that the returned space is not considered parsable by
 387     // any concurrent GC thread.
 388     size_t hdr_size = oopDesc::header_size();
 389     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 390 #endif // ASSERT
 391   }
 392   thread->tlab().fill(obj, obj + size, new_tlab_size);
 393   return Universe::heap()->tlab_post_allocation_setup(obj);
 394 }
 395 
 396 size_t CollectedHeap::max_tlab_size() const {
 397   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 398   // This restriction could be removed by enabling filling with multiple arrays.
 399   // If we compute that the reasonable way as
 400   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 401   // we'll overflow on the multiply, so we do the divide first.
 402   // We actually lose a little by dividing first,
 403   // but that just makes the TLAB  somewhat smaller than the biggest array,
 404   // which is fine, since we'll be able to fill that.
 405   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 406               sizeof(jint) *
 407               ((juint) max_jint / (size_t) HeapWordSize);
 408   return align_down(max_int_size, MinObjAlignment);
 409 }
 410 
 411 size_t CollectedHeap::filler_array_hdr_size() {
 412   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
 413 }


 450   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 451 }
 452 
 453 void
 454 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 455 {
 456   assert(words <= filler_array_max_size(), "too big for a single object");
 457 
 458   if (words >= filler_array_min_size()) {
 459     fill_with_array(start, words, zap);
 460   } else if (words > 0) {
 461     assert(words == min_fill_size(), "unaligned size");
 462     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
 463   }
 464 }
 465 
 466 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 467 {
 468   DEBUG_ONLY(fill_args_check(start, words);)
 469   HandleMark hm;  // Free handles before leaving.
 470   fill_with_object_impl(start, words, zap);
 471 }
 472 
 473 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 474 {
 475   DEBUG_ONLY(fill_args_check(start, words);)
 476   HandleMark hm;  // Free handles before leaving.
 477 
 478   // Multiple objects may be required depending on the filler array maximum size. Fill
 479   // the range up to that with objects that are filler_array_max_size sized. The
 480   // remainder is filled with a single object.
 481   const size_t min = min_fill_size();
 482   const size_t max = filler_array_max_size();
 483   while (words > max) {
 484     const size_t cur = (words - max) >= min ? max : max - min;
 485     fill_with_array(start, cur, zap);
 486     start += cur;
 487     words -= cur;
 488   }
 489 
 490   fill_with_object_impl(start, words, zap);
 491 }
 492 
 493 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
 494   guarantee(false, "thread-local allocation buffers not supported");
 495   return NULL;
 496 }
 497 
 498 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 499   // The second disjunct in the assertion below makes a concession
 500   // for the start-up verification done while the VM is being
 501   // created. Callers be careful that you know that mutators
 502   // aren't going to interfere -- for instance, this is permissible
 503   // if we are still single-threaded and have either not yet
 504   // started allocating (nothing much to verify) or we have
 505   // started allocating but are now a full-fledged JavaThread
 506   // (and have thus made our TLAB's) available for filling.
 507   assert(SafepointSynchronize::is_at_safepoint() ||
 508          !is_init_completed(),
 509          "Should only be called at a safepoint or at start-up"
 510          " otherwise concurrent mutator activity may make heap "


 609   }
 610 }
 611 
 612 void CollectedHeap::reset_promotion_should_fail() {
 613   reset_promotion_should_fail(&_promotion_failure_alot_count);
 614 }
 615 
 616 #endif  // #ifndef PRODUCT
 617 
 618 bool CollectedHeap::supports_object_pinning() const {
 619   return false;
 620 }
 621 
 622 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
 623   ShouldNotReachHere();
 624   return NULL;
 625 }
 626 
 627 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
 628   ShouldNotReachHere();
 629 }
 630 
 631 HeapWord* CollectedHeap::tlab_post_allocation_setup(HeapWord* obj) {
 632   return obj;
 633 }
 634 
 635 uint CollectedHeap::oop_extra_words() {
 636   // Default implementation doesn't need extra space for oops.
 637   return 0;
 638 }
 639 
 640 #ifndef CC_INTERP
 641 void CollectedHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) {
 642   // Default implementation does nothing.
 643 }
 644 #endif


 373   if (obj == NULL) {
 374     return NULL;
 375   }
 376 
 377   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 378 
 379   if (ZeroTLAB) {
 380     // ..and clear it.
 381     Copy::zero_to_words(obj, new_tlab_size);
 382   } else {
 383     // ...and zap just allocated object.
 384 #ifdef ASSERT
 385     // Skip mangling the space corresponding to the object header to
 386     // ensure that the returned space is not considered parsable by
 387     // any concurrent GC thread.
 388     size_t hdr_size = oopDesc::header_size();
 389     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 390 #endif // ASSERT
 391   }
 392   thread->tlab().fill(obj, obj + size, new_tlab_size);
 393   return obj;
 394 }
 395 
 396 size_t CollectedHeap::max_tlab_size() const {
 397   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 398   // This restriction could be removed by enabling filling with multiple arrays.
 399   // If we compute that the reasonable way as
 400   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 401   // we'll overflow on the multiply, so we do the divide first.
 402   // We actually lose a little by dividing first,
 403   // but that just makes the TLAB  somewhat smaller than the biggest array,
 404   // which is fine, since we'll be able to fill that.
 405   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 406               sizeof(jint) *
 407               ((juint) max_jint / (size_t) HeapWordSize);
 408   return align_down(max_int_size, MinObjAlignment);
 409 }
 410 
 411 size_t CollectedHeap::filler_array_hdr_size() {
 412   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
 413 }


 450   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 451 }
 452 
 453 void
 454 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 455 {
 456   assert(words <= filler_array_max_size(), "too big for a single object");
 457 
 458   if (words >= filler_array_min_size()) {
 459     fill_with_array(start, words, zap);
 460   } else if (words > 0) {
 461     assert(words == min_fill_size(), "unaligned size");
 462     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
 463   }
 464 }
 465 
 466 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 467 {
 468   DEBUG_ONLY(fill_args_check(start, words);)
 469   HandleMark hm;  // Free handles before leaving.
 470   Universe::heap()->fill_with_object_impl(start, words, zap);
 471 }
 472 
 473 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 474 {
 475   DEBUG_ONLY(fill_args_check(start, words);)
 476   HandleMark hm;  // Free handles before leaving.
 477 
 478   // Multiple objects may be required depending on the filler array maximum size. Fill
 479   // the range up to that with objects that are filler_array_max_size sized. The
 480   // remainder is filled with a single object.
 481   const size_t min = min_fill_size();
 482   const size_t max = filler_array_max_size();
 483   while (words > max) {
 484     const size_t cur = (words - max) >= min ? max : max - min;
 485     fill_with_array(start, cur, zap);
 486     start += cur;
 487     words -= cur;
 488   }
 489 
 490   Universe::heap()->fill_with_object_impl(start, words, zap);
 491 }
 492 
 493 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
 494   guarantee(false, "thread-local allocation buffers not supported");
 495   return NULL;
 496 }
 497 
 498 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 499   // The second disjunct in the assertion below makes a concession
 500   // for the start-up verification done while the VM is being
 501   // created. Callers be careful that you know that mutators
 502   // aren't going to interfere -- for instance, this is permissible
 503   // if we are still single-threaded and have either not yet
 504   // started allocating (nothing much to verify) or we have
 505   // started allocating but are now a full-fledged JavaThread
 506   // (and have thus made our TLAB's) available for filling.
 507   assert(SafepointSynchronize::is_at_safepoint() ||
 508          !is_init_completed(),
 509          "Should only be called at a safepoint or at start-up"
 510          " otherwise concurrent mutator activity may make heap "


 609   }
 610 }
 611 
 612 void CollectedHeap::reset_promotion_should_fail() {
 613   reset_promotion_should_fail(&_promotion_failure_alot_count);
 614 }
 615 
 616 #endif  // #ifndef PRODUCT
 617 
 618 bool CollectedHeap::supports_object_pinning() const {
 619   return false;
 620 }
 621 
 622 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
 623   ShouldNotReachHere();
 624   return NULL;
 625 }
 626 
 627 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
 628   ShouldNotReachHere();




 629 }
 630 
 631 uint CollectedHeap::oop_extra_words() {
 632   // Default implementation doesn't need extra space for oops.
 633   return 0;
 634 }
 635 
 636 #ifndef CC_INTERP
 637 void CollectedHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) {
 638   // Default implementation does nothing.
 639 }
 640 #endif
< prev index next >