< prev index next >

src/hotspot/share/gc/shared/genCollectedHeap.cpp

Print this page




 709                                                  GenCollectedHeap::GenerationType max_gen) const {
 710   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 711 }
 712 
 713 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 714   ScavengableNMethods::register_nmethod(nm);
 715 }
 716 
 717 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 718   ScavengableNMethods::unregister_nmethod(nm);
 719 }
 720 
 721 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 722   ScavengableNMethods::verify_nmethod(nm);
 723 }
 724 
 725 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
 726   // Do nothing.
 727 }
 728 
 729 void GenCollectedHeap::prune_nmethods() {
 730   ScavengableNMethods::prune_nmethods();
 731 }
 732 
 733 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 734   GCCauseSetter x(this, GCCause::_allocation_failure);
 735   HeapWord* result = NULL;
 736 
 737   assert(size != 0, "Precondition violated");
 738   if (GCLocker::is_active_and_needs_gc()) {
 739     // GC locker is active; instead of a collection we will attempt
 740     // to expand the heap, if there's room for expansion.
 741     if (!is_maximal_no_gc()) {
 742       result = expand_heap_and_allocate(size, is_tlab);
 743     }
 744     return result;   // Could be null if we are out of space.
 745   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 746     // Do an incremental collection.
 747     do_collection(false,                     // full
 748                   false,                     // clear_all_soft_refs
 749                   size,                      // size


 854   }
 855   if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
 856     Management::oops_do(strong_roots);
 857   }
 858   if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
 859     JvmtiExport::oops_do(strong_roots);
 860   }
 861   if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
 862     AOTLoader::oops_do(strong_roots);
 863   }
 864 
 865   if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
 866     SystemDictionary::oops_do(strong_roots);
 867   }
 868 
 869   if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
 870     if (so & SO_ScavengeCodeCache) {
 871       assert(code_roots != NULL, "must supply closure for code cache");
 872 
 873       // We only visit parts of the CodeCache when scavenging.
 874       ScavengableNMethods::scavengable_nmethods_do(code_roots);
 875     }
 876     if (so & SO_AllCodeCache) {
 877       assert(code_roots != NULL, "must supply closure for code cache");
 878 
 879       // CMSCollector uses this to do intermediate-strength collections.
 880       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 881       CodeCache::blobs_do(code_roots);
 882     }
 883     // Verify that the code cache contents are not subject to
 884     // movement by a scavenging collection.
 885     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 886     DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 887   }
 888 }
 889 
 890 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
 891                                            OopsInGenClosure* root_closure,
 892                                            OopsInGenClosure* old_gen_closure,
 893                                            CLDClosure* cld_closure) {
 894   MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);




 709                                                  GenCollectedHeap::GenerationType max_gen) const {
 710   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 711 }
 712 
 713 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 714   ScavengableNMethods::register_nmethod(nm);
 715 }
 716 
 717 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 718   ScavengableNMethods::unregister_nmethod(nm);
 719 }
 720 
 721 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 722   ScavengableNMethods::verify_nmethod(nm);
 723 }
 724 
 725 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
 726   // Do nothing.
 727 }
 728 
 729 void GenCollectedHeap::prune_scavengable_nmethods() {
 730   ScavengableNMethods::prune_nmethods();
 731 }
 732 
 733 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 734   GCCauseSetter x(this, GCCause::_allocation_failure);
 735   HeapWord* result = NULL;
 736 
 737   assert(size != 0, "Precondition violated");
 738   if (GCLocker::is_active_and_needs_gc()) {
 739     // GC locker is active; instead of a collection we will attempt
 740     // to expand the heap, if there's room for expansion.
 741     if (!is_maximal_no_gc()) {
 742       result = expand_heap_and_allocate(size, is_tlab);
 743     }
 744     return result;   // Could be null if we are out of space.
 745   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 746     // Do an incremental collection.
 747     do_collection(false,                     // full
 748                   false,                     // clear_all_soft_refs
 749                   size,                      // size


 854   }
 855   if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
 856     Management::oops_do(strong_roots);
 857   }
 858   if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
 859     JvmtiExport::oops_do(strong_roots);
 860   }
 861   if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
 862     AOTLoader::oops_do(strong_roots);
 863   }
 864 
 865   if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
 866     SystemDictionary::oops_do(strong_roots);
 867   }
 868 
 869   if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
 870     if (so & SO_ScavengeCodeCache) {
 871       assert(code_roots != NULL, "must supply closure for code cache");
 872 
 873       // We only visit parts of the CodeCache when scavenging.
 874       ScavengableNMethods::nmethods_do(code_roots);
 875     }
 876     if (so & SO_AllCodeCache) {
 877       assert(code_roots != NULL, "must supply closure for code cache");
 878 
 879       // CMSCollector uses this to do intermediate-strength collections.
 880       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 881       CodeCache::blobs_do(code_roots);
 882     }
 883     // Verify that the code cache contents are not subject to
 884     // movement by a scavenging collection.
 885     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 886     DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 887   }
 888 }
 889 
 890 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
 891                                            OopsInGenClosure* root_closure,
 892                                            OopsInGenClosure* old_gen_closure,
 893                                            CLDClosure* cld_closure) {
 894   MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);


< prev index next >