< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page




 357     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 358     bs->write_region(deferred);
 359     // "Clear" the deferred_card_mark field
 360     thread->set_deferred_card_mark(MemRegion());
 361   }
 362   assert(thread->deferred_card_mark().is_empty(), "invariant");
 363 }
 364 
 365 size_t CollectedHeap::max_tlab_size() const {
 366   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 367   // This restriction could be removed by enabling filling with multiple arrays.
 368   // If we compute that the reasonable way as
 369   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 370   // we'll overflow on the multiply, so we do the divide first.
 371   // We actually lose a little by dividing first,
 372   // but that just makes the TLAB  somewhat smaller than the biggest array,
 373   // which is fine, since we'll be able to fill that.
 374   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 375               sizeof(jint) *
 376               ((juint) max_jint / (size_t) HeapWordSize);
 377   return align_size_down(max_int_size, MinObjAlignment);
 378 }
 379 
 380 // Helper for ReduceInitialCardMarks. For performance,
 381 // compiled code may elide card-marks for initializing stores
 382 // to a newly allocated object along the fast-path. We
 383 // compensate for such elided card-marks as follows:
 384 // (a) Generational, non-concurrent collectors, such as
 385 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
 386 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
 387 //     need the card-mark if and only if the region is
 388 //     in the old gen, and do not care if the card-mark
 389 //     succeeds or precedes the initializing stores themselves,
 390 //     so long as the card-mark is completed before the next
 391 //     scavenge. For all these cases, we can do a card mark
 392 //     at the point at which we do a slow path allocation
 393 //     in the old gen, i.e. in this call.
 394 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
 395 //     in addition that the card-mark for an old gen allocated
 396 //     object strictly follow any associated initializing stores.
 397 //     In these cases, the memRegion remembered below is




 357     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 358     bs->write_region(deferred);
 359     // "Clear" the deferred_card_mark field
 360     thread->set_deferred_card_mark(MemRegion());
 361   }
 362   assert(thread->deferred_card_mark().is_empty(), "invariant");
 363 }
 364 
 365 size_t CollectedHeap::max_tlab_size() const {
 366   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 367   // This restriction could be removed by enabling filling with multiple arrays.
 368   // If we compute that the reasonable way as
 369   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 370   // we'll overflow on the multiply, so we do the divide first.
 371   // We actually lose a little by dividing first,
 372   // but that just makes the TLAB  somewhat smaller than the biggest array,
 373   // which is fine, since we'll be able to fill that.
 374   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 375               sizeof(jint) *
 376               ((juint) max_jint / (size_t) HeapWordSize);
 377   return align_down(max_int_size, MinObjAlignment);
 378 }
 379 
 380 // Helper for ReduceInitialCardMarks. For performance,
 381 // compiled code may elide card-marks for initializing stores
 382 // to a newly allocated object along the fast-path. We
 383 // compensate for such elided card-marks as follows:
 384 // (a) Generational, non-concurrent collectors, such as
 385 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
 386 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
 387 //     need the card-mark if and only if the region is
 388 //     in the old gen, and do not care if the card-mark
 389 //     succeeds or precedes the initializing stores themselves,
 390 //     so long as the card-mark is completed before the next
 391 //     scavenge. For all these cases, we can do a card mark
 392 //     at the point at which we do a slow path allocation
 393 //     in the old gen, i.e. in this call.
 394 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
 395 //     in addition that the card-mark for an old gen allocated
 396 //     object strictly follow any associated initializing stores.
 397 //     In these cases, the memRegion remembered below is


< prev index next >