src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 4801 : imported patch code-movement
rev 4802 : imported patch optimize-nmethod-scanning
rev 4803 : imported patch thomas-comments


3332   void work(uint worker_id) {
3333     HandleMark hm;
3334     VerifyRegionClosure blk(true, _vo);
3335     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3336                                           _g1h->workers()->active_workers(),
3337                                           HeapRegion::ParVerifyClaimValue);
3338     if (blk.failures()) {
3339       _failures = true;
3340     }
3341   }
3342 };
3343 
3344 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3345   if (SafepointSynchronize::is_at_safepoint()) {
3346     assert(Thread::current()->is_VM_thread(),
3347            "Expected to be executed serially by the VM thread at this point");
3348 
3349     if (!silent) { gclog_or_tty->print("Roots "); }
3350     VerifyRootsClosure rootsCl(vo);
3351     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3352     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl, /*do_marking=*/ false);
3353     VerifyKlassClosure klassCl(this, &rootsCl);
3354 
3355     // We apply the relevant closures to all the oops in the
3356     // system dictionary, the string table and the code cache.
3357     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3358 
3359     // Need cleared claim bits for the strong roots processing
3360     ClassLoaderDataGraph::clear_claimed_marks();
3361 
3362     process_strong_roots(true,      // activate StrongRootsScope
3363                          false,     // we set "is scavenging" to false,
3364                                     // so we don't reset the dirty cards.
3365                          ScanningOption(so),  // roots scanning options
3366                          &rootsCl,
3367                          &blobsCl,
3368                          &klassCl
3369                          );
3370 
3371     bool failures = rootsCl.failures() || codeRootsCl.failures();
3372 


3860 
3861     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3862                                 workers()->active_workers() : 1);
3863     double pause_start_sec = os::elapsedTime();
3864     g1_policy()->phase_times()->note_gc_start(active_workers);
3865     log_gc_header();
3866 
3867     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3868     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3869 
3870     // If the secondary_free_list is not empty, append it to the
3871     // free_list. No need to wait for the cleanup operation to finish;
3872     // the region allocation code will check the secondary_free_list
3873     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3874     // set, skip this step so that the region allocation code has to
3875     // get entries from the secondary_free_list.
3876     if (!G1StressConcRegionFreeing) {
3877       append_secondary_free_list_if_not_empty_with_lock();
3878     }
3879 
3880     assert(check_young_list_well_formed(),
3881       "young list should be well formed");
3882 
3883     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3884              "sanity check");
3885 
3886     // Don't dynamically change the number of GC threads this early.  A value of
3887     // 0 is used to indicate serial work.  When parallel work is done,
3888     // it will be set.
3889 
3890     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3891       IsGCActiveMark x;
3892 
3893       gc_prologue(false);
3894       increment_total_collections(false /* full gc */);
3895       increment_gc_time_stamp();
3896 
3897       verify_before_gc();
3898 
3899       COMPILER2_PRESENT(DerivedPointerTable::clear());
3900 
3901       // Please see comment in g1CollectedHeap.hpp and
3902       // G1CollectedHeap::ref_processing_init() to see how


4968       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4969 
4970       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4971       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4972 
4973       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4974       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4975       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4976 
4977       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4978       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4979 
4980       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4981         // We also need to mark copied objects.
4982         scan_root_cl = &scan_mark_root_cl;
4983         scan_klasses_cl = &scan_mark_klasses_cl_s;
4984       }
4985 
4986       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4987 
4988       // Don't scan the code cache as part of strong root scanning. The code
4989       // roots that point into a region are scanned when we scan the RSet
4990       // of that region.

4991       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
4992 
4993       pss.start_strong_roots();
4994       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4995                                     SharedHeap::ScanningOption(so),
4996                                     scan_root_cl,
4997                                     &push_heap_rs_cl,
4998                                     scan_klasses_cl,
4999                                     worker_id);
5000       pss.end_strong_roots();
5001 
5002       {
5003         double start = os::elapsedTime();
5004         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
5005         evac.do_void();
5006         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
5007         double term_ms = pss.term_time()*1000.0;
5008         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
5009         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
5010       }


5082 
5083   // During conc marking we have to filter the per-thread SATB buffers
5084   // to make sure we remove any oops into the CSet (which will show up
5085   // as implicitly live).
5086   double satb_filtering_ms = 0.0;
5087   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5088     if (mark_in_progress()) {
5089       double satb_filter_start = os::elapsedTime();
5090 
5091       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5092 
5093       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5094     }
5095   }
5096   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5097 
5098   // If this is an initial mark pause, and we're not scanning
5099   // the entire code cache, we need to mark the oops in the
5100   // strong code root lists for the regions that are not in
5101   // the collection set. 
5102   // Note all threads partipate in this set of root tasks.
5103   double mark_strong_code_roots_ms = 0.0;
5104   if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
5105     double mark_strong_roots_start = os::elapsedTime();
5106     mark_strong_code_roots(worker_i);
5107     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5108   }
5109   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5110 
5111   // Now scan the complement of the collection set.
5112   if (scan_rs != NULL) {
5113     g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
5114   }
5115   _process_strong_tasks->all_tasks_completed();
5116 }
5117 
5118 void
5119 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5120   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5121   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5122 }


6534   _humongous_set.verify_start();
6535   _free_list.verify_start();
6536 
6537   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6538   heap_region_iterate(&cl);
6539 
6540   _old_set.verify_end();
6541   _humongous_set.verify_end();
6542   _free_list.verify_end();
6543 }
6544 
6545 // Optimized nmethod scanning
6546 
6547 class RegisterNMethodOopClosure: public OopClosure {
6548   G1CollectedHeap* _g1h;
6549   nmethod* _nm;
6550 
6551   template <class T> void do_oop_work(T* p) {
6552     T heap_oop = oopDesc::load_heap_oop(p);
6553     if (!oopDesc::is_null(heap_oop)) {
6554       HeapRegion* hr = _g1h->heap_region_containing(heap_oop);
6555       assert(!hr->isHumongous(), "nmethod oop in numongous?");

6556 
6557       // Note this may push duplicates but that is OK since
6558       // when we scan the nmethods during GC we "mark" them
6559       // as visited.
6560       hr->push_strong_code_root(_nm);
6561       assert(hr->strong_code_root_list()->contains(_nm), "push failed?");
6562     }
6563   }
6564 
6565 public:
6566   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6567     _g1h(g1h), _nm(nm) {}
6568 
6569   void do_oop(oop* p)       { do_oop_work(p); }
6570   void do_oop(narrowOop* p) { do_oop_work(p); }
6571 };
6572 
6573 class UnregisterNMethodOopClosure: public OopClosure {
6574   G1CollectedHeap* _g1h;
6575   nmethod* _nm;
6576 
6577   template <class T> void do_oop_work(T* p) {
6578     T heap_oop = oopDesc::load_heap_oop(p);
6579     if (!oopDesc::is_null(heap_oop)) {
6580       HeapRegion* hr = _g1h->heap_region_containing(heap_oop);
6581       assert(!hr->isHumongous(), "nmethod oop in numongous?");

6582       hr->remove_strong_code_root(_nm);
6583       assert(!hr->strong_code_root_list()->contains(_nm), "remove failed?");
6584     }
6585   }
6586 
6587 public:
6588   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6589     _g1h(g1h), _nm(nm) {}
6590 
6591   void do_oop(oop* p)       { do_oop_work(p); }
6592   void do_oop(narrowOop* p) { do_oop_work(p); }
6593 };
6594 
6595 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6596   assert(nm != NULL, "sanity");
6597   if (nm == NULL) return;
6598 
6599   RegisterNMethodOopClosure reg_cl(this, nm);
6600   nm->oops_do(&reg_cl);
6601 }
6602 
6603 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6604   assert(nm != NULL, "sanity");
6605   if (nm == NULL)  return;
6606 
6607   UnregisterNMethodOopClosure reg_cl(this, nm);
6608   nm->oops_do(&reg_cl);
6609 }
6610 
6611 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
6612 public:
6613   bool doHeapRegion(HeapRegion *hr) {
6614     assert(!hr->isHumongous(), "humongous region in collection set?");
6615     hr->migrate_strong_code_roots();
6616     return false;
6617   }
6618 };
6619 
6620 void G1CollectedHeap::migrate_strong_code_roots() {
6621   MigrateCodeRootsHeapRegionClosure cl;
6622   double migrate_start = os::elapsedTime();
6623   collection_set_iterate(&cl);
6624   double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6625   g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6626 }
6627 













6628 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
6629   // Note when we're marking the oops in the strong code roots lists
6630   // for regions not in the collection set, we do not want to use a
6631   // "marking" CodeBlobToOopClosure. We don't want to get in the following
6632   // situation if marking sees an nmethod whose oops span a couple
6633   // of regions (one in the collection set and the other not).
6634   //
6635   // A "marking" CodeBlobToOopClosure would end up "marking" the nmethod
6636   // by walking the code root list for region not in the collection set.
6637   // When we come to scan the code root list for the region in the
6638   // collection, we would skip the nmethod because it's already been
6639   // "marked" - potentially missing some roots.
6640 
6641   class MarkStrongCodeRootOopClosure: public OopClosure {
6642     ConcurrentMark* _cm;
6643     HeapRegion* _hr;
6644     uint _worker_id;
6645 
6646     template <class T> void do_oop_work(T* p) {
6647       T heap_oop = oopDesc::load_heap_oop(p);
6648       if (!oopDesc::is_null(heap_oop)) {
6649         oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6650         // Only mark objects in the region (which is assumed
6651         // to be not in the collection set).
6652         if (_hr->is_in(obj)) {
6653           _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
6654         }
6655       }
6656     }
6657 
6658   public:
6659     MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
6660       _cm(cm), _hr(hr), _worker_id(worker_id) {}


6661 
6662     void do_oop(narrowOop* p) { do_oop_work(p); }
6663     void do_oop(oop* p)       { do_oop_work(p); }
6664   };
6665 
6666   MarkStrongCodeRootOopClosure _oop_cl;
6667 
6668 public:
6669   MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
6670     _oop_cl(cm, hr, worker_id) {}
6671 
6672   void do_code_blob(CodeBlob* cb) {
6673     nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
6674     if (nm != NULL) {
6675       nm->oops_do(&_oop_cl);
6676     }
6677   }
6678 };
6679 
6680 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {




3332   void work(uint worker_id) {
3333     HandleMark hm;
3334     VerifyRegionClosure blk(true, _vo);
3335     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3336                                           _g1h->workers()->active_workers(),
3337                                           HeapRegion::ParVerifyClaimValue);
3338     if (blk.failures()) {
3339       _failures = true;
3340     }
3341   }
3342 };
3343 
3344 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3345   if (SafepointSynchronize::is_at_safepoint()) {
3346     assert(Thread::current()->is_VM_thread(),
3347            "Expected to be executed serially by the VM thread at this point");
3348 
3349     if (!silent) { gclog_or_tty->print("Roots "); }
3350     VerifyRootsClosure rootsCl(vo);
3351     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3352     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl, false /* do_marking */);
3353     VerifyKlassClosure klassCl(this, &rootsCl);
3354 
3355     // We apply the relevant closures to all the oops in the
3356     // system dictionary, the string table and the code cache.
3357     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3358 
3359     // Need cleared claim bits for the strong roots processing
3360     ClassLoaderDataGraph::clear_claimed_marks();
3361 
3362     process_strong_roots(true,      // activate StrongRootsScope
3363                          false,     // we set "is scavenging" to false,
3364                                     // so we don't reset the dirty cards.
3365                          ScanningOption(so),  // roots scanning options
3366                          &rootsCl,
3367                          &blobsCl,
3368                          &klassCl
3369                          );
3370 
3371     bool failures = rootsCl.failures() || codeRootsCl.failures();
3372 


3860 
3861     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3862                                 workers()->active_workers() : 1);
3863     double pause_start_sec = os::elapsedTime();
3864     g1_policy()->phase_times()->note_gc_start(active_workers);
3865     log_gc_header();
3866 
3867     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3868     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3869 
3870     // If the secondary_free_list is not empty, append it to the
3871     // free_list. No need to wait for the cleanup operation to finish;
3872     // the region allocation code will check the secondary_free_list
3873     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3874     // set, skip this step so that the region allocation code has to
3875     // get entries from the secondary_free_list.
3876     if (!G1StressConcRegionFreeing) {
3877       append_secondary_free_list_if_not_empty_with_lock();
3878     }
3879 
3880     assert(check_young_list_well_formed(), "young list should be well formed");


3881     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3882            "sanity check");
3883 
3884     // Don't dynamically change the number of GC threads this early.  A value of
3885     // 0 is used to indicate serial work.  When parallel work is done,
3886     // it will be set.
3887 
3888     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3889       IsGCActiveMark x;
3890 
3891       gc_prologue(false);
3892       increment_total_collections(false /* full gc */);
3893       increment_gc_time_stamp();
3894 
3895       verify_before_gc();
3896 
3897       COMPILER2_PRESENT(DerivedPointerTable::clear());
3898 
3899       // Please see comment in g1CollectedHeap.hpp and
3900       // G1CollectedHeap::ref_processing_init() to see how


4966       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4967 
4968       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4969       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4970 
4971       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4972       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4973       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4974 
4975       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4976       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4977 
4978       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4979         // We also need to mark copied objects.
4980         scan_root_cl = &scan_mark_root_cl;
4981         scan_klasses_cl = &scan_mark_klasses_cl_s;
4982       }
4983 
4984       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4985 
4986       // Don't scan the scavengable methods in the code cache as part
4987       // of strong root scanning. The code roots that point into a 
4988       // region in the collection set are scanned when we scan the
4989       // region's RSet.
4990       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
4991 
4992       pss.start_strong_roots();
4993       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4994                                     SharedHeap::ScanningOption(so),
4995                                     scan_root_cl,
4996                                     &push_heap_rs_cl,
4997                                     scan_klasses_cl,
4998                                     worker_id);
4999       pss.end_strong_roots();
5000 
5001       {
5002         double start = os::elapsedTime();
5003         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
5004         evac.do_void();
5005         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
5006         double term_ms = pss.term_time()*1000.0;
5007         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
5008         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
5009       }


5081 
5082   // During conc marking we have to filter the per-thread SATB buffers
5083   // to make sure we remove any oops into the CSet (which will show up
5084   // as implicitly live).
5085   double satb_filtering_ms = 0.0;
5086   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5087     if (mark_in_progress()) {
5088       double satb_filter_start = os::elapsedTime();
5089 
5090       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5091 
5092       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5093     }
5094   }
5095   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5096 
5097   // If this is an initial mark pause, and we're not scanning
5098   // the entire code cache, we need to mark the oops in the
5099   // strong code root lists for the regions that are not in
5100   // the collection set. 
5101   // Note all threads participate in this set of root tasks.
5102   double mark_strong_code_roots_ms = 0.0;
5103   if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
5104     double mark_strong_roots_start = os::elapsedTime();
5105     mark_strong_code_roots(worker_i);
5106     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5107   }
5108   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5109 
5110   // Now scan the complement of the collection set.
5111   if (scan_rs != NULL) {
5112     g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
5113   }
5114   _process_strong_tasks->all_tasks_completed();
5115 }
5116 
5117 void
5118 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5119   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5120   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5121 }


6533   _humongous_set.verify_start();
6534   _free_list.verify_start();
6535 
6536   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6537   heap_region_iterate(&cl);
6538 
6539   _old_set.verify_end();
6540   _humongous_set.verify_end();
6541   _free_list.verify_end();
6542 }
6543 
6544 // Optimized nmethod scanning
6545 
6546 class RegisterNMethodOopClosure: public OopClosure {
6547   G1CollectedHeap* _g1h;
6548   nmethod* _nm;
6549 
6550   template <class T> void do_oop_work(T* p) {
6551     T heap_oop = oopDesc::load_heap_oop(p);
6552     if (!oopDesc::is_null(heap_oop)) {
6553       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6554       HeapRegion* hr = _g1h->heap_region_containing(obj);
6555       assert(!hr->isHumongous(), "code root in humongous region?");
6556 
6557       // HeapRegion::add_strong_code_root() avoids adding duplicate
6558       // entries but having duplicates is  OK since we "mark" nmethods
6559       // as visited when we scan the strong code root lists during the GC.
6560       hr->add_strong_code_root(_nm);
6561       assert(hr->strong_code_root_list()->contains(_nm), "push failed?");
6562     }
6563   }
6564 
6565 public:
6566   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6567     _g1h(g1h), _nm(nm) {}
6568 
6569   void do_oop(oop* p)       { do_oop_work(p); }
6570   void do_oop(narrowOop* p) { do_oop_work(p); }
6571 };
6572 
6573 class UnregisterNMethodOopClosure: public OopClosure {
6574   G1CollectedHeap* _g1h;
6575   nmethod* _nm;
6576 
6577   template <class T> void do_oop_work(T* p) {
6578     T heap_oop = oopDesc::load_heap_oop(p);
6579     if (!oopDesc::is_null(heap_oop)) {
6580       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6581       HeapRegion* hr = _g1h->heap_region_containing(obj);
6582       assert(!hr->isHumongous(), "code root in humongous region?");
6583       hr->remove_strong_code_root(_nm);
6584       assert(!hr->strong_code_root_list()->contains(_nm), "remove failed?");
6585     }
6586   }
6587 
6588 public:
6589   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6590     _g1h(g1h), _nm(nm) {}
6591 
6592   void do_oop(oop* p)       { do_oop_work(p); }
6593   void do_oop(narrowOop* p) { do_oop_work(p); }
6594 };
6595 
6596 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6597   guarantee(nm != NULL, "sanity");


6598   RegisterNMethodOopClosure reg_cl(this, nm);
6599   nm->oops_do(&reg_cl);
6600 }
6601 
6602 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6603   guarantee(nm != NULL, "sanity");


6604   UnregisterNMethodOopClosure reg_cl(this, nm);
6605   nm->oops_do(&reg_cl);
6606 }
6607 
6608 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
6609 public:
6610   bool doHeapRegion(HeapRegion *hr) {
6611     assert(!hr->isHumongous(), "humongous region in collection set?");
6612     hr->migrate_strong_code_roots();
6613     return false;
6614   }
6615 };
6616 
6617 void G1CollectedHeap::migrate_strong_code_roots() {
6618   MigrateCodeRootsHeapRegionClosure cl;
6619   double migrate_start = os::elapsedTime();
6620   collection_set_iterate(&cl);
6621   double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6622   g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6623 }
6624 
6625 // Mark all the code roots that point into regions *not* in the
6626 // collection set.
6627 //
6628 // Note we do not want to use a "marking" CodeBlobToOopClosure while
6629 // walking the the code roots lists of regions not in the collection
6630 // set. Suppose we have an nmethod (M) that points to objects in two
6631 // separate regions - one in the collection set (R1) and one not (R2).
6632 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
6633 // nmethod M when walking the code roots for R1. When we come to scan
6634 // the code roots for R2, we would see that M is already marked and it
6635 // would be skipped and the objects in R2 that are referenced from M
6636 // would not be evacuated.
6637 
6638 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {











6639 
6640   class MarkStrongCodeRootOopClosure: public OopClosure {
6641     ConcurrentMark* _cm;
6642     HeapRegion* _hr;
6643     uint _worker_id;
6644 
6645     template <class T> void do_oop_work(T* p) {
6646       T heap_oop = oopDesc::load_heap_oop(p);
6647       if (!oopDesc::is_null(heap_oop)) {
6648         oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6649         // Only mark objects in the region (which is assumed
6650         // to be not in the collection set).
6651         if (_hr->is_in(obj)) {
6652           _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
6653         }
6654       }
6655     }
6656 
6657   public:
6658     MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
6659       _cm(cm), _hr(hr), _worker_id(worker_id) {
6660       assert(!_hr->in_collection_set(), "sanity");
6661     }
6662 
6663     void do_oop(narrowOop* p) { do_oop_work(p); }
6664     void do_oop(oop* p)       { do_oop_work(p); }
6665   };
6666 
6667   MarkStrongCodeRootOopClosure _oop_cl;
6668 
6669 public:
6670   MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
6671     _oop_cl(cm, hr, worker_id) {}
6672 
6673   void do_code_blob(CodeBlob* cb) {
6674     nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
6675     if (nm != NULL) {
6676       nm->oops_do(&_oop_cl);
6677     }
6678   }
6679 };
6680 
6681 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {