137 "Not in range.");
138 }
139
140 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
141 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
142 {
143 if (_active) {
144 _sh->register_strong_roots_scope(this);
145 _sh->change_strong_roots_parity();
146 // Zero the claimed high water mark in the StringTable
147 StringTable::clear_parallel_claimed_index();
148 }
149 }
150
151 SharedHeap::StrongRootsScope::~StrongRootsScope() {
152 if (_active) {
153 _sh->unregister_strong_roots_scope(this);
154 }
155 }
156
157 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
158
159 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
160 // The Thread work barrier is only needed by G1 Class Unloading.
161 // No need to use the barrier if this is single-threaded code.
162 if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
163 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
164 if (new_value == n_workers) {
165 // This thread is last. Notify the others.
166 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
167 _lock->notify_all();
168 }
169 }
170 }
171
172 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
173 assert(UseG1GC, "Currently only used by G1");
174 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
175
176 // No need to use the barrier if this is single-threaded code.
177 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
|
137 "Not in range.");
138 }
139
140 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
141 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
142 {
143 if (_active) {
144 _sh->register_strong_roots_scope(this);
145 _sh->change_strong_roots_parity();
146 // Zero the claimed high water mark in the StringTable
147 StringTable::clear_parallel_claimed_index();
148 }
149 }
150
151 SharedHeap::StrongRootsScope::~StrongRootsScope() {
152 if (_active) {
153 _sh->unregister_strong_roots_scope(this);
154 }
155 }
156
157 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false, Monitor::_safepoint_check_never);
158
159 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
160 // The Thread work barrier is only needed by G1 Class Unloading.
161 // No need to use the barrier if this is single-threaded code.
162 if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
163 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
164 if (new_value == n_workers) {
165 // This thread is last. Notify the others.
166 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
167 _lock->notify_all();
168 }
169 }
170 }
171
172 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
173 assert(UseG1GC, "Currently only used by G1");
174 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
175
176 // No need to use the barrier if this is single-threaded code.
177 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
|