< prev index next >

src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp

Print this page
rev 49945 : imported patch 8191471-g1-varying-tlab-allocation
rev 49946 : imported patch 8191471-g1-retained-mutator-region
rev 49949 : imported patch 8191471-tschatzl-comments-open
rev 49950 : [mq]: 8191471-pliden-comments


 461   return result;
 462 }
 463 
 464 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 465   CollectedHeap::ensure_parsability(retire_tlabs);
 466   young_gen()->eden_space()->ensure_parsability();
 467 }
 468 
 469 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 470   return young_gen()->eden_space()->tlab_capacity(thr);
 471 }
 472 
 473 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
 474   return young_gen()->eden_space()->tlab_used(thr);
 475 }
 476 
 477 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 478   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 479 }
 480 
 481 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
 482   return young_gen()->allocate(size);





 483 }
 484 
 485 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 486   CollectedHeap::accumulate_statistics_all_tlabs();
 487 }
 488 
 489 void ParallelScavengeHeap::resize_all_tlabs() {
 490   CollectedHeap::resize_all_tlabs();
 491 }
 492 
 493 // This method is used by System.gc() and JVMTI.
 494 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 495   assert(!Heap_lock->owned_by_self(),
 496     "this thread should not own the Heap_lock");
 497 
 498   uint gc_count      = 0;
 499   uint full_gc_count = 0;
 500   {
 501     MutexLocker ml(Heap_lock);
 502     // This value is guarded by the Heap_lock




 461   return result;
 462 }
 463 
 464 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 465   CollectedHeap::ensure_parsability(retire_tlabs);
 466   young_gen()->eden_space()->ensure_parsability();
 467 }
 468 
 469 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 470   return young_gen()->eden_space()->tlab_capacity(thr);
 471 }
 472 
 473 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
 474   return young_gen()->eden_space()->tlab_used(thr);
 475 }
 476 
 477 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 478   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 479 }
 480 
 481 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 482   HeapWord* result = young_gen()->allocate(requested_size);
 483   if (result != NULL) {
 484     *actual_size = requested_size;
 485   }
 486 
 487   return result;
 488 }
 489 
 490 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 491   CollectedHeap::accumulate_statistics_all_tlabs();
 492 }
 493 
 494 void ParallelScavengeHeap::resize_all_tlabs() {
 495   CollectedHeap::resize_all_tlabs();
 496 }
 497 
 498 // This method is used by System.gc() and JVMTI.
 499 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 500   assert(!Heap_lock->owned_by_self(),
 501     "this thread should not own the Heap_lock");
 502 
 503   uint gc_count      = 0;
 504   uint full_gc_count = 0;
 505   {
 506     MutexLocker ml(Heap_lock);
 507     // This value is guarded by the Heap_lock


< prev index next >