index

src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Print this page
rev 7780 : imported patch 8072621
   1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 243 // simultaneously. When that happens, only one VM operation will succeed,
 244 // and the rest will not be executed. For that reason, this method loops
 245 // during failed allocation attempts. If the java heap becomes exhausted,
 246 // we rely on the size_policy object to force a bail out.
 247 HeapWord* ParallelScavengeHeap::mem_allocate(
 248                                      size_t size,
 249                                      bool* gc_overhead_limit_was_exceeded) {
 250   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 251   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 252   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 253 
 254   // In general gc_overhead_limit_was_exceeded should be false so
 255   // set it so here and reset it to true only if the gc time
 256   // limit is being exceeded as checked below.
 257   *gc_overhead_limit_was_exceeded = false;
 258 
 259   HeapWord* result = young_gen()->allocate(size);
 260 
 261   uint loop_count = 0;
 262   uint gc_count = 0;
 263   int gclocker_stalled_count = 0;
 264 
 265   while (result == NULL) {
 266     // We don't want to have multiple collections for a single filled generation.
 267     // To prevent this, each thread tracks the total_collections() value, and if
 268     // the count has changed, does not do a new collection.
 269     //
 270     // The collection count must be read only while holding the heap lock. VM
 271     // operations also hold the heap lock during collections. There is a lock
 272     // contention case where thread A blocks waiting on the Heap_lock, while
 273     // thread B is holding it doing a collection. When thread A gets the lock,
 274     // the collection count has already changed. To prevent duplicate collections,
 275     // The policy MUST attempt allocations during the same period it reads the
 276     // total_collections() value!
 277     {
 278       MutexLocker ml(Heap_lock);
 279       gc_count = Universe::heap()->total_collections();
 280 
 281       result = young_gen()->allocate(size);
 282       if (result != NULL) {
 283         return result;


 503 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 504   CollectedHeap::accumulate_statistics_all_tlabs();
 505 }
 506 
 507 void ParallelScavengeHeap::resize_all_tlabs() {
 508   CollectedHeap::resize_all_tlabs();
 509 }
 510 
 511 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
 512   // We don't need barriers for stores to objects in the
 513   // young gen and, a fortiori, for initializing stores to
 514   // objects therein.
 515   return is_in_young(new_obj);
 516 }
 517 
 518 // This method is used by System.gc() and JVMTI.
 519 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 520   assert(!Heap_lock->owned_by_self(),
 521     "this thread should not own the Heap_lock");
 522 
 523   unsigned int gc_count      = 0;
 524   unsigned int full_gc_count = 0;
 525   {
 526     MutexLocker ml(Heap_lock);
 527     // This value is guarded by the Heap_lock
 528     gc_count      = Universe::heap()->total_collections();
 529     full_gc_count = Universe::heap()->total_full_collections();
 530   }
 531 
 532   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 533   VMThread::execute(&op);
 534 }
 535 
 536 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
 537   Unimplemented();
 538 }
 539 
 540 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 541   young_gen()->object_iterate(cl);
 542   old_gen()->object_iterate(cl);
 543 }
 544 


   1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 243 // simultaneously. When that happens, only one VM operation will succeed,
 244 // and the rest will not be executed. For that reason, this method loops
 245 // during failed allocation attempts. If the java heap becomes exhausted,
 246 // we rely on the size_policy object to force a bail out.
 247 HeapWord* ParallelScavengeHeap::mem_allocate(
 248                                      size_t size,
 249                                      bool* gc_overhead_limit_was_exceeded) {
 250   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 251   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 252   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 253 
 254   // In general gc_overhead_limit_was_exceeded should be false so
 255   // set it so here and reset it to true only if the gc time
 256   // limit is being exceeded as checked below.
 257   *gc_overhead_limit_was_exceeded = false;
 258 
 259   HeapWord* result = young_gen()->allocate(size);
 260 
 261   uint loop_count = 0;
 262   uint gc_count = 0;
 263   uint gclocker_stalled_count = 0;
 264 
 265   while (result == NULL) {
 266     // We don't want to have multiple collections for a single filled generation.
 267     // To prevent this, each thread tracks the total_collections() value, and if
 268     // the count has changed, does not do a new collection.
 269     //
 270     // The collection count must be read only while holding the heap lock. VM
 271     // operations also hold the heap lock during collections. There is a lock
 272     // contention case where thread A blocks waiting on the Heap_lock, while
 273     // thread B is holding it doing a collection. When thread A gets the lock,
 274     // the collection count has already changed. To prevent duplicate collections,
 275     // The policy MUST attempt allocations during the same period it reads the
 276     // total_collections() value!
 277     {
 278       MutexLocker ml(Heap_lock);
 279       gc_count = Universe::heap()->total_collections();
 280 
 281       result = young_gen()->allocate(size);
 282       if (result != NULL) {
 283         return result;


 503 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 504   CollectedHeap::accumulate_statistics_all_tlabs();
 505 }
 506 
 507 void ParallelScavengeHeap::resize_all_tlabs() {
 508   CollectedHeap::resize_all_tlabs();
 509 }
 510 
 511 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
 512   // We don't need barriers for stores to objects in the
 513   // young gen and, a fortiori, for initializing stores to
 514   // objects therein.
 515   return is_in_young(new_obj);
 516 }
 517 
 518 // This method is used by System.gc() and JVMTI.
 519 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 520   assert(!Heap_lock->owned_by_self(),
 521     "this thread should not own the Heap_lock");
 522 
 523   uint gc_count      = 0;
 524   uint full_gc_count = 0;
 525   {
 526     MutexLocker ml(Heap_lock);
 527     // This value is guarded by the Heap_lock
 528     gc_count      = Universe::heap()->total_collections();
 529     full_gc_count = Universe::heap()->total_full_collections();
 530   }
 531 
 532   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 533   VMThread::execute(&op);
 534 }
 535 
 536 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
 537   Unimplemented();
 538 }
 539 
 540 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 541   young_gen()->object_iterate(cl);
 542   old_gen()->object_iterate(cl);
 543 }
 544 


index