< prev index next >

src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Print this page




 499 }
 500 
 501 // This method is used by System.gc() and JVMTI.
 502 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 503   assert(!Heap_lock->owned_by_self(),
 504     "this thread should not own the Heap_lock");
 505 
 506   uint gc_count      = 0;
 507   uint full_gc_count = 0;
 508   {
 509     MutexLocker ml(Heap_lock);
 510     // This value is guarded by the Heap_lock
 511     gc_count      = Universe::heap()->total_collections();
 512     full_gc_count = Universe::heap()->total_full_collections();
 513   }
 514 
 515   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 516   VMThread::execute(&op);
 517 }
 518 
 519 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
 520   Unimplemented();
 521 }
 522 
 523 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 524   young_gen()->object_iterate(cl);
 525   old_gen()->object_iterate(cl);
 526 }
 527 
 528 
 529 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 530   if (young_gen()->is_in_reserved(addr)) {
 531     assert(young_gen()->is_in(addr),
 532            "addr should be in allocated part of young gen");
 533     // called from os::print_location by find or VMError
 534     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 535     Unimplemented();
 536   } else if (old_gen()->is_in_reserved(addr)) {
 537     assert(old_gen()->is_in(addr),
 538            "addr should be in allocated part of old gen");
 539     return old_gen()->start_array()->object_start((HeapWord*)addr);
 540   }
 541   return 0;
 542 }




 499 }
 500 
 501 // This method is used by System.gc() and JVMTI.
 502 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 503   assert(!Heap_lock->owned_by_self(),
 504     "this thread should not own the Heap_lock");
 505 
 506   uint gc_count      = 0;
 507   uint full_gc_count = 0;
 508   {
 509     MutexLocker ml(Heap_lock);
 510     // This value is guarded by the Heap_lock
 511     gc_count      = Universe::heap()->total_collections();
 512     full_gc_count = Universe::heap()->total_full_collections();
 513   }
 514 
 515   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 516   VMThread::execute(&op);
 517 }
 518 




 519 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 520   young_gen()->object_iterate(cl);
 521   old_gen()->object_iterate(cl);
 522 }
 523 
 524 
 525 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 526   if (young_gen()->is_in_reserved(addr)) {
 527     assert(young_gen()->is_in(addr),
 528            "addr should be in allocated part of young gen");
 529     // called from os::print_location by find or VMError
 530     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 531     Unimplemented();
 532   } else if (old_gen()->is_in_reserved(addr)) {
 533     assert(old_gen()->is_in(addr),
 534            "addr should be in allocated part of old gen");
 535     return old_gen()->start_array()->object_start((HeapWord*)addr);
 536   }
 537   return 0;
 538 }


< prev index next >