4576 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4577 void do_klass(Klass* klass) {
4578 // If the klass has not been dirtied we know that there's
4579 // no references into the young gen and we can skip it.
4580 if (!_process_only_dirty || klass->has_modified_oops()) {
4581 // Clean the klass since we're going to scavenge all the metadata.
4582 klass->clear_modified_oops();
4583
4584 // Tell the closure that this klass is the Klass to scavenge
4585 // and is the one to dirty if oops are left pointing into the young gen.
4586 _closure->set_scanned_klass(klass);
4587
4588 klass->oops_do(_closure);
4589
4590 _closure->set_scanned_klass(NULL);
4591 }
4592 _count++;
4593 }
4594 };
4595
4596 class G1ParTask : public AbstractGangTask {
4597 protected:
4598 G1CollectedHeap* _g1h;
4599 RefToScanQueueSet *_queues;
4600 ParallelTaskTerminator _terminator;
4601 uint _n_workers;
4602
4603 Mutex _stats_lock;
4604 Mutex* stats_lock() { return &_stats_lock; }
4605
4606 public:
4607 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
4608 : AbstractGangTask("G1 collection"),
4609 _g1h(g1h),
4610 _queues(task_queues),
4611 _terminator(0, _queues),
4612 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4613 {}
4614
4615 RefToScanQueueSet* queues() { return _queues; }
4644 G1KlassScanClosure _klass_in_cld_closure;
4645 bool _claim;
4646
4647 public:
4648 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4649 bool only_young, bool claim)
4650 : _oop_closure(oop_closure),
4651 _oop_in_klass_closure(oop_closure->g1(),
4652 oop_closure->pss(),
4653 oop_closure->rp()),
4654 _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4655 _claim(claim) {
4656
4657 }
4658
4659 void do_cld(ClassLoaderData* cld) {
4660 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4661 }
4662 };
4663
4664 class G1CodeBlobClosure: public CodeBlobClosure {
4665 OopClosure* _f;
4666
4667 public:
4668 G1CodeBlobClosure(OopClosure* f) : _f(f) {}
4669 void do_code_blob(CodeBlob* blob) {
4670 nmethod* that = blob->as_nmethod_or_null();
4671 if (that != NULL) {
4672 if (!that->test_set_oops_do_mark()) {
4673 that->oops_do(_f);
4674 that->fix_oop_relocations();
4675 }
4676 }
4677 }
4678 };
4679
4680 void work(uint worker_id) {
4681 if (worker_id >= _n_workers) return; // no work needed this round
4682
4683 double start_time_ms = os::elapsedTime() * 1000.0;
4684 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4685
4686 {
4687 ResourceMark rm;
4688 HandleMark hm;
4689
4690 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4691
4692 G1ParScanThreadState pss(_g1h, worker_id, rp);
4693 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4694
4695 pss.set_evac_failure_closure(&evac_failure_cl);
4696
4697 bool only_young = _g1h->g1_policy()->gcs_are_young();
4698
4699 // Non-IM young GC.
4850 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4851
4852 g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4853
4854 // During conc marking we have to filter the per-thread SATB buffers
4855 // to make sure we remove any oops into the CSet (which will show up
4856 // as implicitly live).
4857 double satb_filtering_ms = 0.0;
4858 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4859 if (mark_in_progress()) {
4860 double satb_filter_start = os::elapsedTime();
4861
4862 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4863
4864 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4865 }
4866 }
4867 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4868
4869 // Now scan the complement of the collection set.
4870 MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
4871
4872 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4873
4874 _process_strong_tasks->all_tasks_completed();
4875 }
4876
4877 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4878 private:
4879 BoolObjectClosure* _is_alive;
4880 int _initial_string_table_size;
4881 int _initial_symbol_table_size;
4882
4883 bool _process_strings;
4884 int _strings_processed;
4885 int _strings_removed;
4886
4887 bool _process_symbols;
4888 int _symbols_processed;
4889 int _symbols_removed;
4890
5897
5898 // Weak root processing.
5899 {
5900 G1STWIsAliveClosure is_alive(this);
5901 G1KeepAliveClosure keep_alive(this);
5902 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5903 if (G1StringDedup::is_enabled()) {
5904 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5905 }
5906 }
5907
5908 release_gc_alloc_regions(n_workers, evacuation_info);
5909 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5910
5911 // Reset and re-enable the hot card cache.
5912 // Note the counts for the cards in the regions in the
5913 // collection set are reset when the collection set is freed.
5914 hot_card_cache->reset_hot_cache();
5915 hot_card_cache->set_use_cache(true);
5916
5917 // Migrate the strong code roots attached to each region in
5918 // the collection set. Ideally we would like to do this
5919 // after we have finished the scanning/evacuation of the
5920 // strong code roots for a particular heap region.
5921 migrate_strong_code_roots();
5922
5923 purge_code_root_memory();
5924
5925 if (g1_policy()->during_initial_mark_pause()) {
5926 // Reset the claim values set during marking the strong code roots
5927 reset_heap_region_claim_values();
5928 }
5929
5930 finalize_for_evac_failure();
5931
5932 if (evacuation_failed()) {
5933 remove_self_forwarding_pointers();
5934
5935 // Reset the G1EvacuationFailureALot counters and flags
5936 // Note: the values are reset only when an actual
5937 // evacuation failure occurs.
5938 NOT_PRODUCT(reset_evacuation_should_fail();)
5939 }
5940
5941 // Enqueue any remaining references remaining on the STW
5942 // reference processor's discovered lists. We need to do
6956 cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
6957 }
6958
6959 // Optimized nmethod scanning
6960
6961 class RegisterNMethodOopClosure: public OopClosure {
6962 G1CollectedHeap* _g1h;
6963 nmethod* _nm;
6964
6965 template <class T> void do_oop_work(T* p) {
6966 T heap_oop = oopDesc::load_heap_oop(p);
6967 if (!oopDesc::is_null(heap_oop)) {
6968 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6969 HeapRegion* hr = _g1h->heap_region_containing(obj);
6970 assert(!hr->continuesHumongous(),
6971 err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6972 " starting at "HR_FORMAT,
6973 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6974
6975 // HeapRegion::add_strong_code_root() avoids adding duplicate
6976 // entries but having duplicates is OK since we "mark" nmethods
6977 // as visited when we scan the strong code root lists during the GC.
6978 hr->add_strong_code_root(_nm);
6979 assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
6980 err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
6981 _nm, HR_FORMAT_PARAMS(hr)));
6982 }
6983 }
6984
6985 public:
6986 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6987 _g1h(g1h), _nm(nm) {}
6988
6989 void do_oop(oop* p) { do_oop_work(p); }
6990 void do_oop(narrowOop* p) { do_oop_work(p); }
6991 };
6992
6993 class UnregisterNMethodOopClosure: public OopClosure {
6994 G1CollectedHeap* _g1h;
6995 nmethod* _nm;
6996
6997 template <class T> void do_oop_work(T* p) {
6998 T heap_oop = oopDesc::load_heap_oop(p);
6999 if (!oopDesc::is_null(heap_oop)) {
7000 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
7001 HeapRegion* hr = _g1h->heap_region_containing(obj);
7002 assert(!hr->continuesHumongous(),
7003 err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
7004 " starting at "HR_FORMAT,
7005 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
7006
7007 hr->remove_strong_code_root(_nm);
7008 assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
7009 err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
7010 _nm, HR_FORMAT_PARAMS(hr)));
7011 }
7012 }
7013
7014 public:
7015 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
7016 _g1h(g1h), _nm(nm) {}
7017
7018 void do_oop(oop* p) { do_oop_work(p); }
7019 void do_oop(narrowOop* p) { do_oop_work(p); }
7020 };
7021
7022 void G1CollectedHeap::register_nmethod(nmethod* nm) {
7023 CollectedHeap::register_nmethod(nm);
7024
7025 guarantee(nm != NULL, "sanity");
7026 RegisterNMethodOopClosure reg_cl(this, nm);
7027 nm->oops_do(®_cl);
7028 }
7029
7030 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
7031 CollectedHeap::unregister_nmethod(nm);
7032
7033 guarantee(nm != NULL, "sanity");
7034 UnregisterNMethodOopClosure reg_cl(this, nm);
7035 nm->oops_do(®_cl, true);
7036 }
7037
7038 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
7039 public:
7040 bool doHeapRegion(HeapRegion *hr) {
7041 assert(!hr->isHumongous(),
7042 err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
7043 HR_FORMAT_PARAMS(hr)));
7044 hr->migrate_strong_code_roots();
7045 return false;
7046 }
7047 };
7048
7049 void G1CollectedHeap::migrate_strong_code_roots() {
7050 MigrateCodeRootsHeapRegionClosure cl;
7051 double migrate_start = os::elapsedTime();
7052 collection_set_iterate(&cl);
7053 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
7054 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
7055 }
7056
7057 void G1CollectedHeap::purge_code_root_memory() {
7058 double purge_start = os::elapsedTime();
7059 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
7060 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
7061 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
7062 }
7063
7064 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
7065 G1CollectedHeap* _g1h;
7066
7067 public:
7068 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
7069 _g1h(g1h) {}
7070
7071 void do_code_blob(CodeBlob* cb) {
7072 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
7073 if (nm == NULL) {
7074 return;
7075 }
7076
7077 if (ScavengeRootsInCode) {
7078 _g1h->register_nmethod(nm);
7079 }
|
4576 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4577 void do_klass(Klass* klass) {
4578 // If the klass has not been dirtied we know that there's
4579 // no references into the young gen and we can skip it.
4580 if (!_process_only_dirty || klass->has_modified_oops()) {
4581 // Clean the klass since we're going to scavenge all the metadata.
4582 klass->clear_modified_oops();
4583
4584 // Tell the closure that this klass is the Klass to scavenge
4585 // and is the one to dirty if oops are left pointing into the young gen.
4586 _closure->set_scanned_klass(klass);
4587
4588 klass->oops_do(_closure);
4589
4590 _closure->set_scanned_klass(NULL);
4591 }
4592 _count++;
4593 }
4594 };
4595
4596 class G1CodeBlobClosure : public CodeBlobClosure {
4597 class HeapRegionGatheringOopClosure : public OopClosure {
4598 G1CollectedHeap* _g1h;
4599 OopClosure* _work;
4600 nmethod* _nm;
4601
4602 template <typename T>
4603 void do_oop_work(T* p) {
4604 _work->do_oop(p);
4605 T oop_or_narrowoop = oopDesc::load_heap_oop(p);
4606 if (!oopDesc::is_null(oop_or_narrowoop)) {
4607 oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
4608 HeapRegion* hr = _g1h->heap_region_containing_raw(o);
4609 assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
4610 hr->lock_add_strong_code_root(_nm);
4611 }
4612 }
4613
4614 public:
4615 HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
4616
4617 void do_oop(oop* o) {
4618 do_oop_work(o);
4619 }
4620
4621 void do_oop(narrowOop* o) {
4622 do_oop_work(o);
4623 }
4624
4625 void set_nm(nmethod* nm) {
4626 _nm = nm;
4627 }
4628 };
4629
4630 HeapRegionGatheringOopClosure _oc;
4631 public:
4632 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4633
4634 void do_code_blob(CodeBlob* cb) {
4635 nmethod* that = cb->as_nmethod_or_null();
4636 if (that != NULL) {
4637 if (!that->test_set_oops_do_mark()) {
4638 _oc.set_nm(that);
4639 that->oops_do(&_oc);
4640 that->fix_oop_relocations();
4641 }
4642 }
4643 }
4644 };
4645
4646 class G1ParTask : public AbstractGangTask {
4647 protected:
4648 G1CollectedHeap* _g1h;
4649 RefToScanQueueSet *_queues;
4650 ParallelTaskTerminator _terminator;
4651 uint _n_workers;
4652
4653 Mutex _stats_lock;
4654 Mutex* stats_lock() { return &_stats_lock; }
4655
4656 public:
4657 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
4658 : AbstractGangTask("G1 collection"),
4659 _g1h(g1h),
4660 _queues(task_queues),
4661 _terminator(0, _queues),
4662 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4663 {}
4664
4665 RefToScanQueueSet* queues() { return _queues; }
4694 G1KlassScanClosure _klass_in_cld_closure;
4695 bool _claim;
4696
4697 public:
4698 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4699 bool only_young, bool claim)
4700 : _oop_closure(oop_closure),
4701 _oop_in_klass_closure(oop_closure->g1(),
4702 oop_closure->pss(),
4703 oop_closure->rp()),
4704 _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4705 _claim(claim) {
4706
4707 }
4708
4709 void do_cld(ClassLoaderData* cld) {
4710 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4711 }
4712 };
4713
4714 void work(uint worker_id) {
4715 if (worker_id >= _n_workers) return; // no work needed this round
4716
4717 double start_time_ms = os::elapsedTime() * 1000.0;
4718 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4719
4720 {
4721 ResourceMark rm;
4722 HandleMark hm;
4723
4724 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4725
4726 G1ParScanThreadState pss(_g1h, worker_id, rp);
4727 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4728
4729 pss.set_evac_failure_closure(&evac_failure_cl);
4730
4731 bool only_young = _g1h->g1_policy()->gcs_are_young();
4732
4733 // Non-IM young GC.
4884 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4885
4886 g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4887
4888 // During conc marking we have to filter the per-thread SATB buffers
4889 // to make sure we remove any oops into the CSet (which will show up
4890 // as implicitly live).
4891 double satb_filtering_ms = 0.0;
4892 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4893 if (mark_in_progress()) {
4894 double satb_filter_start = os::elapsedTime();
4895
4896 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4897
4898 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4899 }
4900 }
4901 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4902
4903 // Now scan the complement of the collection set.
4904 G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4905
4906 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4907
4908 _process_strong_tasks->all_tasks_completed();
4909 }
4910
4911 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4912 private:
4913 BoolObjectClosure* _is_alive;
4914 int _initial_string_table_size;
4915 int _initial_symbol_table_size;
4916
4917 bool _process_strings;
4918 int _strings_processed;
4919 int _strings_removed;
4920
4921 bool _process_symbols;
4922 int _symbols_processed;
4923 int _symbols_removed;
4924
5931
5932 // Weak root processing.
5933 {
5934 G1STWIsAliveClosure is_alive(this);
5935 G1KeepAliveClosure keep_alive(this);
5936 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5937 if (G1StringDedup::is_enabled()) {
5938 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5939 }
5940 }
5941
5942 release_gc_alloc_regions(n_workers, evacuation_info);
5943 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5944
5945 // Reset and re-enable the hot card cache.
5946 // Note the counts for the cards in the regions in the
5947 // collection set are reset when the collection set is freed.
5948 hot_card_cache->reset_hot_cache();
5949 hot_card_cache->set_use_cache(true);
5950
5951 purge_code_root_memory();
5952
5953 if (g1_policy()->during_initial_mark_pause()) {
5954 // Reset the claim values set during marking the strong code roots
5955 reset_heap_region_claim_values();
5956 }
5957
5958 finalize_for_evac_failure();
5959
5960 if (evacuation_failed()) {
5961 remove_self_forwarding_pointers();
5962
5963 // Reset the G1EvacuationFailureALot counters and flags
5964 // Note: the values are reset only when an actual
5965 // evacuation failure occurs.
5966 NOT_PRODUCT(reset_evacuation_should_fail();)
5967 }
5968
5969 // Enqueue any remaining references remaining on the STW
5970 // reference processor's discovered lists. We need to do
6984 cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
6985 }
6986
6987 // Optimized nmethod scanning
6988
6989 class RegisterNMethodOopClosure: public OopClosure {
6990 G1CollectedHeap* _g1h;
6991 nmethod* _nm;
6992
6993 template <class T> void do_oop_work(T* p) {
6994 T heap_oop = oopDesc::load_heap_oop(p);
6995 if (!oopDesc::is_null(heap_oop)) {
6996 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6997 HeapRegion* hr = _g1h->heap_region_containing(obj);
6998 assert(!hr->continuesHumongous(),
6999 err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
7000 " starting at "HR_FORMAT,
7001 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
7002
7003 // HeapRegion::add_strong_code_root() avoids adding duplicate
7004 // entries.
7005 hr->add_strong_code_root(_nm);
7006 }
7007 }
7008
7009 public:
7010 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
7011 _g1h(g1h), _nm(nm) {}
7012
7013 void do_oop(oop* p) { do_oop_work(p); }
7014 void do_oop(narrowOop* p) { do_oop_work(p); }
7015 };
7016
7017 class UnregisterNMethodOopClosure: public OopClosure {
7018 G1CollectedHeap* _g1h;
7019 nmethod* _nm;
7020
7021 template <class T> void do_oop_work(T* p) {
7022 T heap_oop = oopDesc::load_heap_oop(p);
7023 if (!oopDesc::is_null(heap_oop)) {
7024 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
7025 HeapRegion* hr = _g1h->heap_region_containing(obj);
7026 assert(!hr->continuesHumongous(),
7027 err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
7028 " starting at "HR_FORMAT,
7029 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
7030
7031 hr->remove_strong_code_root(_nm);
7032 }
7033 }
7034
7035 public:
7036 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
7037 _g1h(g1h), _nm(nm) {}
7038
7039 void do_oop(oop* p) { do_oop_work(p); }
7040 void do_oop(narrowOop* p) { do_oop_work(p); }
7041 };
7042
7043 void G1CollectedHeap::register_nmethod(nmethod* nm) {
7044 CollectedHeap::register_nmethod(nm);
7045
7046 guarantee(nm != NULL, "sanity");
7047 RegisterNMethodOopClosure reg_cl(this, nm);
7048 nm->oops_do(®_cl);
7049 }
7050
7051 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
7052 CollectedHeap::unregister_nmethod(nm);
7053
7054 guarantee(nm != NULL, "sanity");
7055 UnregisterNMethodOopClosure reg_cl(this, nm);
7056 nm->oops_do(®_cl, true);
7057 }
7058
7059 void G1CollectedHeap::purge_code_root_memory() {
7060 double purge_start = os::elapsedTime();
7061 G1CodeRootSet::purge();
7062 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
7063 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
7064 }
7065
7066 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
7067 G1CollectedHeap* _g1h;
7068
7069 public:
7070 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
7071 _g1h(g1h) {}
7072
7073 void do_code_blob(CodeBlob* cb) {
7074 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
7075 if (nm == NULL) {
7076 return;
7077 }
7078
7079 if (ScavengeRootsInCode) {
7080 _g1h->register_nmethod(nm);
7081 }
|