< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 50392 : JEP 331


 349 #ifdef ASSERT
 350 void CollectedHeap::check_for_valid_allocation_state() {
 351   Thread *thread = Thread::current();
 352   // How to choose between a pending exception and a potential
 353   // OutOfMemoryError?  Don't allow pending exceptions.
 354   // This is a VM policy failure, so how do we exhaustively test it?
 355   assert(!thread->has_pending_exception(),
 356          "shouldn't be allocating with pending exception");
 357   if (StrictSafepointChecks) {
 358     assert(thread->allow_allocation(),
 359            "Allocation done by thread for which allocation is blocked "
 360            "by No_Allocation_Verifier!");
 361     // Allocation of an oop can always invoke a safepoint,
 362     // hence, the true argument
 363     thread->check_for_valid_safepoint_state(true);
 364   }
 365 }
 366 #endif
 367 
 368 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {




















 369 
 370   // Retain tlab and allocate object in shared space if
 371   // the amount free in the tlab is too large to discard.
 372   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 373     thread->tlab().record_slow_allocation(size);
 374     return NULL;
 375   }
 376 
 377   // Discard tlab and allocate a new one.
 378   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 379   size_t new_tlab_size = thread->tlab().compute_size(size);
 380 
 381   thread->tlab().clear_before_allocation();
 382 
 383   if (new_tlab_size == 0) {
 384     return NULL;
 385   }
 386 
 387   // Allocate a new TLAB requesting new_tlab_size. Any size
 388   // between minimal and new_tlab_size is accepted.
 389   size_t actual_tlab_size = 0;
 390   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
 391   HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
 392   if (obj == NULL) {
 393     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
 394            min_tlab_size, new_tlab_size, actual_tlab_size);
 395     return NULL;
 396   }
 397   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
 398          p2i(obj), min_tlab_size, new_tlab_size);
 399 
 400   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 401 
 402   if (ZeroTLAB) {
 403     // ..and clear it.
 404     Copy::zero_to_words(obj, actual_tlab_size);
 405   } else {
 406     // ...and zap just allocated object.
 407 #ifdef ASSERT
 408     // Skip mangling the space corresponding to the object header to
 409     // ensure that the returned space is not considered parsable by
 410     // any concurrent GC thread.
 411     size_t hdr_size = oopDesc::header_size();
 412     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
 413 #endif // ASSERT
 414   }








 415   thread->tlab().fill(obj, obj + size, actual_tlab_size);
 416   return obj;
 417 }
 418 
 419 size_t CollectedHeap::max_tlab_size() const {
 420   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 421   // This restriction could be removed by enabling filling with multiple arrays.
 422   // If we compute that the reasonable way as
 423   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 424   // we'll overflow on the multiply, so we do the divide first.
 425   // We actually lose a little by dividing first,
 426   // but that just makes the TLAB  somewhat smaller than the biggest array,
 427   // which is fine, since we'll be able to fill that.
 428   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 429               sizeof(jint) *
 430               ((juint) max_jint / (size_t) HeapWordSize);
 431   return align_down(max_int_size, MinObjAlignment);
 432 }
 433 
 434 size_t CollectedHeap::filler_array_hdr_size() {




 349 #ifdef ASSERT
 350 void CollectedHeap::check_for_valid_allocation_state() {
 351   Thread *thread = Thread::current();
 352   // How to choose between a pending exception and a potential
 353   // OutOfMemoryError?  Don't allow pending exceptions.
 354   // This is a VM policy failure, so how do we exhaustively test it?
 355   assert(!thread->has_pending_exception(),
 356          "shouldn't be allocating with pending exception");
 357   if (StrictSafepointChecks) {
 358     assert(thread->allow_allocation(),
 359            "Allocation done by thread for which allocation is blocked "
 360            "by No_Allocation_Verifier!");
 361     // Allocation of an oop can always invoke a safepoint,
 362     // hence, the true argument
 363     thread->check_for_valid_safepoint_state(true);
 364   }
 365 }
 366 #endif
 367 
 368 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 369   HeapWord* obj = NULL;
 370 
 371   // In assertion mode, check that there was a sampling collector present
 372   // in the stack. This enforces checking that no path is without a sampling
 373   // collector.
 374   // Only check if the sampler could actually sample something in this call path.
 375   assert(!JvmtiExport::should_post_sampled_object_alloc()
 376          || !JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample()
 377          || thread->heap_sampler().sampling_collector_present(),
 378          "Sampling collector not present.");
 379 
 380   if (ThreadHeapSampler::enabled()) {
 381     // Try to allocate the sampled object from TLAB, it is possible a sample
 382     // point was put and the TLAB still has space.
 383     obj = thread->tlab().allocate_sampled_object(size);
 384 
 385     if (obj != NULL) {
 386       return obj;
 387     }
 388   }
 389 
 390   // Retain tlab and allocate object in shared space if
 391   // the amount free in the tlab is too large to discard.
 392   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 393     thread->tlab().record_slow_allocation(size);
 394     return NULL;
 395   }
 396 
 397   // Discard tlab and allocate a new one.
 398   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 399   size_t new_tlab_size = thread->tlab().compute_size(size);
 400 
 401   thread->tlab().clear_before_allocation();
 402 
 403   if (new_tlab_size == 0) {
 404     return NULL;
 405   }
 406 
 407   // Allocate a new TLAB requesting new_tlab_size. Any size
 408   // between minimal and new_tlab_size is accepted.
 409   size_t actual_tlab_size = 0;
 410   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
 411   obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
 412   if (obj == NULL) {
 413     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
 414            min_tlab_size, new_tlab_size, actual_tlab_size);
 415     return NULL;
 416   }
 417   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
 418          p2i(obj), min_tlab_size, new_tlab_size);
 419 
 420   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 421 
 422   if (ZeroTLAB) {
 423     // ..and clear it.
 424     Copy::zero_to_words(obj, actual_tlab_size);
 425   } else {
 426     // ...and zap just allocated object.
 427 #ifdef ASSERT
 428     // Skip mangling the space corresponding to the object header to
 429     // ensure that the returned space is not considered parsable by
 430     // any concurrent GC thread.
 431     size_t hdr_size = oopDesc::header_size();
 432     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
 433 #endif // ASSERT
 434   }
 435 
 436   // Send the thread information about this allocation in case a sample is
 437   // requested.
 438   if (ThreadHeapSampler::enabled()) {
 439     size_t tlab_bytes_since_last_sample = thread->tlab().bytes_since_last_sample_point();
 440     thread->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample);
 441   }
 442 
 443   thread->tlab().fill(obj, obj + size, actual_tlab_size);
 444   return obj;
 445 }
 446 
 447 size_t CollectedHeap::max_tlab_size() const {
 448   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 449   // This restriction could be removed by enabling filling with multiple arrays.
 450   // If we compute that the reasonable way as
 451   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 452   // we'll overflow on the multiply, so we do the divide first.
 453   // We actually lose a little by dividing first,
 454   // but that just makes the TLAB  somewhat smaller than the biggest array,
 455   // which is fine, since we'll be able to fill that.
 456   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 457               sizeof(jint) *
 458               ((juint) max_jint / (size_t) HeapWordSize);
 459   return align_down(max_int_size, MinObjAlignment);
 460 }
 461 
 462 size_t CollectedHeap::filler_array_hdr_size() {


< prev index next >