1 /* 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/shared/parallelCleaning.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "prims/resolvedMethodTable.hpp" 32 #include "logging/log.hpp" 33 #include "gc/shared/gcCause.hpp" 34 #include "gc/shared/gcTraceTime.hpp" 35 #include "gc/shared/gcTraceTime.inline.hpp" 36 37 StringSymbolTableUnlinkTask::StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : 38 AbstractGangTask("String/Symbol Unlinking"), 39 _is_alive(is_alive), 40 _par_state_string(StringTable::weak_storage()), 41 _process_strings(process_strings), _strings_processed(0), _strings_removed(0), 42 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { 43 44 _initial_string_table_size = (int) StringTable::the_table()->table_size(); 45 _initial_symbol_table_size = SymbolTable::the_table()->table_size(); 46 if (process_symbols) { 47 SymbolTable::clear_parallel_claimed_index(); 48 } 49 } 50 51 StringSymbolTableUnlinkTask::~StringSymbolTableUnlinkTask() { 52 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, 53 "claim value %d after unlink less than initial symbol table size %d", 54 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); 55 56 log_info(gc, stringtable)( 57 "Cleaned string and symbol table, " 58 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " 59 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", 60 strings_processed(), strings_removed(), 61 symbols_processed(), symbols_removed()); 62 } 63 64 void StringSymbolTableUnlinkTask::work(uint worker_id) { 65 int strings_processed = 0; 66 int strings_removed = 0; 67 int symbols_processed = 0; 68 int symbols_removed = 0; 69 if (_process_strings) { 70 StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); 71 Atomic::add(strings_processed, &_strings_processed); 72 Atomic::add(strings_removed, &_strings_removed); 73 } 74 if (_process_symbols) { 75 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); 76 Atomic::add(symbols_processed, &_symbols_processed); 77 Atomic::add(symbols_removed, &_symbols_removed); 78 } 79 } 80 81 size_t StringSymbolTableUnlinkTask::strings_processed() const { return (size_t)_strings_processed; } 82 size_t StringSymbolTableUnlinkTask::strings_removed() const { return (size_t)_strings_removed; } 83 84 size_t StringSymbolTableUnlinkTask::symbols_processed() const { return (size_t)_symbols_processed; } 85 size_t StringSymbolTableUnlinkTask::symbols_removed() const { return (size_t)_symbols_removed; } 86 87 88 Monitor* CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never); 89 90 CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : 91 _is_alive(is_alive), 92 _unloading_occurred(unloading_occurred), 93 _num_workers(num_workers), 94 _first_nmethod(NULL), 95 _claimed_nmethod(NULL), 96 _postponed_list(NULL), 97 _num_entered_barrier(0) 98 { 99 CompiledMethod::increase_unloading_clock(); 100 // Get first alive nmethod 101 CompiledMethodIterator iter = CompiledMethodIterator(); 102 if(iter.next_alive()) { 103 _first_nmethod = iter.method(); 104 } 105 _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod; 106 } 107 108 CodeCacheUnloadingTask::~CodeCacheUnloadingTask() { 109 CodeCache::verify_clean_inline_caches(); 110 111 CodeCache::set_needs_cache_clean(false); 112 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); 113 114 CodeCache::verify_icholder_relocations(); 115 } 116 117 void CodeCacheUnloadingTask::add_to_postponed_list(CompiledMethod* nm) { 118 CompiledMethod* old; 119 do { 120 old = (CompiledMethod*)_postponed_list; 121 nm->set_unloading_next(old); 122 } while ((CompiledMethod*)Atomic::cmpxchg(nm, &_postponed_list, old) != old); 123 } 124 125 void CodeCacheUnloadingTask::clean_nmethod(CompiledMethod* nm) { 126 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); 127 128 if (postponed) { 129 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. 130 add_to_postponed_list(nm); 131 } 132 133 // Mark that this thread has been cleaned/unloaded. 134 // After this call, it will be safe to ask if this nmethod was unloaded or not. 135 nm->set_unloading_clock(CompiledMethod::global_unloading_clock()); 136 } 137 138 void CodeCacheUnloadingTask::clean_nmethod_postponed(CompiledMethod* nm) { 139 nm->do_unloading_parallel_postponed(); 140 } 141 142 void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) { 143 CompiledMethod* first; 144 CompiledMethodIterator last; 145 146 do { 147 *num_claimed_nmethods = 0; 148 149 first = (CompiledMethod*)_claimed_nmethod; 150 last = CompiledMethodIterator(first); 151 152 if (first != NULL) { 153 154 for (int i = 0; i < MaxClaimNmethods; i++) { 155 if (!last.next_alive()) { 156 break; 157 } 158 claimed_nmethods[i] = last.method(); 159 (*num_claimed_nmethods)++; 160 } 161 } 162 163 } while ((CompiledMethod*)Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first); 164 } 165 166 CompiledMethod* CodeCacheUnloadingTask::claim_postponed_nmethod() { 167 CompiledMethod* claim; 168 CompiledMethod* next; 169 170 do { 171 claim = (CompiledMethod*)_postponed_list; 172 if (claim == NULL) { 173 return NULL; 174 } 175 176 next = claim->unloading_next(); 177 178 } while ((CompiledMethod*)Atomic::cmpxchg(next, &_postponed_list, claim) != claim); 179 180 return claim; 181 } 182 183 // Mark that we're done with the first pass of nmethod cleaning. 184 void CodeCacheUnloadingTask::barrier_mark(uint worker_id) { 185 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 186 _num_entered_barrier++; 187 if (_num_entered_barrier == _num_workers) { 188 ml.notify_all(); 189 } 190 } 191 192 // See if we have to wait for the other workers to 193 // finish their first-pass nmethod cleaning work. 194 void CodeCacheUnloadingTask::barrier_wait(uint worker_id) { 195 if (_num_entered_barrier < _num_workers) { 196 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 197 while (_num_entered_barrier < _num_workers) { 198 ml.wait(Mutex::_no_safepoint_check_flag, 0, false); 199 } 200 } 201 } 202 203 // Cleaning and unloading of nmethods. Some work has to be postponed 204 // to the second pass, when we know which nmethods survive. 205 void CodeCacheUnloadingTask::work_first_pass(uint worker_id) { 206 // The first nmethods is claimed by the first worker. 207 if (worker_id == 0 && _first_nmethod != NULL) { 208 clean_nmethod(_first_nmethod); 209 _first_nmethod = NULL; 210 } 211 212 int num_claimed_nmethods; 213 CompiledMethod* claimed_nmethods[MaxClaimNmethods]; 214 215 while (true) { 216 claim_nmethods(claimed_nmethods, &num_claimed_nmethods); 217 218 if (num_claimed_nmethods == 0) { 219 break; 220 } 221 222 for (int i = 0; i < num_claimed_nmethods; i++) { 223 clean_nmethod(claimed_nmethods[i]); 224 } 225 } 226 } 227 228 void CodeCacheUnloadingTask::work_second_pass(uint worker_id) { 229 CompiledMethod* nm; 230 // Take care of postponed nmethods. 231 while ((nm = claim_postponed_nmethod()) != NULL) { 232 clean_nmethod_postponed(nm); 233 } 234 } 235 236 KlassCleaningTask::KlassCleaningTask(BoolObjectClosure* is_alive) : 237 _is_alive(is_alive), 238 _clean_klass_tree_claimed(0), 239 _klass_iterator() { 240 } 241 242 bool KlassCleaningTask::claim_clean_klass_tree_task() { 243 if (_clean_klass_tree_claimed) { 244 return false; 245 } 246 247 return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0; 248 } 249 250 InstanceKlass* KlassCleaningTask::claim_next_klass() { 251 Klass* klass; 252 do { 253 klass =_klass_iterator.next_klass(); 254 } while (klass != NULL && !klass->is_instance_klass()); 255 256 // this can be null so don't call InstanceKlass::cast 257 return static_cast<InstanceKlass*>(klass); 258 } 259 260 void KlassCleaningTask::clean_klass(InstanceKlass* ik) { 261 ik->clean_weak_instanceklass_links(); 262 } 263 264 void KlassCleaningTask::work() { 265 ResourceMark rm; 266 267 // One worker will clean the subklass/sibling klass tree. 268 if (claim_clean_klass_tree_task()) { 269 Klass::clean_subklass_tree(); 270 } 271 272 // All workers will help cleaning the classes, 273 InstanceKlass* klass; 274 while ((klass = claim_next_klass()) != NULL) { 275 clean_klass(klass); 276 } 277 } 278 279 bool ResolvedMethodCleaningTask::claim_resolved_method_task() { 280 if (_resolved_method_task_claimed) { 281 return false; 282 } 283 return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0; 284 } 285 286 // These aren't big, one thread can do it all. 287 void ResolvedMethodCleaningTask::work() { 288 if (claim_resolved_method_task()) { 289 ResolvedMethodTable::unlink(); 290 } 291 } 292 293 ParallelCleaningTask::ParallelCleaningTask(BoolObjectClosure* is_alive, 294 bool process_strings, 295 bool process_symbols, 296 uint num_workers, 297 bool unloading_occurred) : 298 AbstractGangTask("Parallel Cleaning"), 299 _string_symbol_task(is_alive, process_strings, process_symbols), 300 _code_cache_task(num_workers, is_alive, unloading_occurred), 301 _klass_cleaning_task(is_alive), 302 _resolved_method_cleaning_task(is_alive) 303 { 304 305 306 } 307 308 // The parallel work done by all worker threads. 309 void ParallelCleaningTask::work(uint worker_id) { 310 // Do first pass of code cache cleaning. 311 _code_cache_task.work_first_pass(worker_id); 312 313 // Let the threads mark that the first pass is done. 314 _code_cache_task.barrier_mark(worker_id); 315 316 // Clean the Strings and Symbols. 317 _string_symbol_task.work(worker_id); 318 319 // Clean unreferenced things in the ResolvedMethodTable 320 _resolved_method_cleaning_task.work(); 321 322 // Wait for all workers to finish the first code cache cleaning pass. 323 _code_cache_task.barrier_wait(worker_id); 324 325 // Do the second code cache cleaning work, which realize on 326 // the liveness information gathered during the first pass. 327 _code_cache_task.work_second_pass(worker_id); 328 329 // Clean all klasses that were not unloaded. 330 _klass_cleaning_task.work(); 331 }