76 public:
77 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
78
79 void do_code_blob(CodeBlob* cb) {
80 nmethod* nm = cb->as_nmethod_or_null();
81 if (nm != NULL) {
82 if (!nm->test_set_oops_do_mark()) {
83 _oc.set_nm(nm);
84 nm->oops_do(&_oc);
85 nm->fix_oop_relocations();
86 }
87 }
88 }
89 };
90
91
92 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
93 uint n_workers = _g1h->n_par_threads();
94 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
95
96 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
97 if (new_value == n_workers) {
98 // This thread is last. Notify the others.
99 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
100 _lock.notify_all();
101 }
102 }
103
104 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
105 uint n_workers = _g1h->n_par_threads();
106 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
107
108 if ((uint)_n_workers_discovered_strong_classes != n_workers) {
109 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
110 while ((uint)_n_workers_discovered_strong_classes != n_workers) {
111 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
112 }
113 }
114 }
115
116 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
117 _g1h(g1h),
118 _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
119 _srs(g1h),
120 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false),
121 _n_workers_discovered_strong_classes(0) {}
122
123 void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
124 OopClosure* scan_non_heap_weak_roots,
125 CLDClosure* scan_strong_clds,
126 CLDClosure* scan_weak_clds,
127 bool trace_metadata,
128 uint worker_i) {
|
76 public:
77 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
78
79 void do_code_blob(CodeBlob* cb) {
80 nmethod* nm = cb->as_nmethod_or_null();
81 if (nm != NULL) {
82 if (!nm->test_set_oops_do_mark()) {
83 _oc.set_nm(nm);
84 nm->oops_do(&_oc);
85 nm->fix_oop_relocations();
86 }
87 }
88 }
89 };
90
91
92 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
93 uint n_workers = _g1h->n_par_threads();
94 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
95
96 if (n_workers > 0) {
97 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
98 if (new_value == n_workers) {
99 // This thread is last. Notify the others.
100 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
101 _lock.notify_all();
102 }
103 }
104 }
105
106 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
107 uint n_workers = _g1h->n_par_threads();
108 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
109
110 if (n_workers > 0 && (uint)_n_workers_discovered_strong_classes != n_workers) {
111 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
112 while ((uint)_n_workers_discovered_strong_classes != n_workers) {
113 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
114 }
115 }
116 }
117
118 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
119 _g1h(g1h),
120 _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
121 _srs(g1h),
122 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false),
123 _n_workers_discovered_strong_classes(0) {}
124
125 void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
126 OopClosure* scan_non_heap_weak_roots,
127 CLDClosure* scan_strong_clds,
128 CLDClosure* scan_weak_clds,
129 bool trace_metadata,
130 uint worker_i) {
|