1 /* 2 * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "code/codeCache.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 29 #include "gc/shenandoah/shenandoahUtils.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "memory/universe.hpp" 32 #include "runtime/mutex.inline.hpp" 33 34 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) { 35 _length = heaps->length(); 36 _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC); 37 for (int h = 0; h < _length; h++) { 38 _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h)); 39 } 40 } 41 42 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() { 43 FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters); 44 } 45 46 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) { 47 for (int c = 0; c < _length; c++) { 48 _iters[c].parallel_blobs_do(f); 49 } 50 } 51 52 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) : 53 _heap(heap), _claimed_idx(0), _finished(false) { 54 } 55 56 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) { 57 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 58 59 /* 60 * Parallel code heap walk. 61 * 62 * This code makes all threads scan all code heaps, but only one thread would execute the 63 * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread 64 * had claimed the block, it can process all blobs in it. Others have to fast-forward to 65 * next attempt without processing. 66 * 67 * Late threads would return immediately if iterator is finished. 68 */ 69 70 if (_finished) { 71 return; 72 } 73 74 int stride = 256; // educated guess 75 int stride_mask = stride - 1; 76 assert (is_power_of_2(stride), "sanity"); 77 78 int count = 0; 79 bool process_block = true; 80 81 for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) { 82 int current = count++; 83 if ((current & stride_mask) == 0) { 84 process_block = (current >= _claimed_idx) && 85 (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current); 86 } 87 if (process_block) { 88 if (cb->is_alive()) { 89 f->do_code_blob(cb); 90 #ifdef ASSERT 91 if (cb->is_nmethod()) 92 Universe::heap()->verify_nmethod((nmethod*)cb); 93 #endif 94 } 95 } 96 } 97 98 _finished = true; 99 } 100 101 class ShenandoahNMethodOopDetector : public OopClosure { 102 private: 103 ResourceMark rm; // For growable array allocation below. 104 GrowableArray<oop*> _oops; 105 106 public: 107 ShenandoahNMethodOopDetector() : _oops(10) {}; 108 109 void do_oop(oop* o) { 110 _oops.append(o); 111 } 112 void do_oop(narrowOop* o) { 113 fatal("NMethods should not have compressed oops embedded."); 114 } 115 116 GrowableArray<oop*>* oops() { 117 return &_oops; 118 } 119 120 bool has_oops() { 121 return !_oops.is_empty(); 122 } 123 }; 124 125 GrowableArray<ShenandoahNMethod*>* ShenandoahCodeRoots::_recorded_nms; 126 ShenandoahLock ShenandoahCodeRoots::_recorded_nms_lock; 127 128 void ShenandoahCodeRoots::initialize() { 129 _recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray<ShenandoahNMethod*>(100, true, mtGC); 130 } 131 132 void ShenandoahCodeRoots::add_nmethod(nmethod* nm) { 133 switch (ShenandoahCodeRootsStyle) { 134 case 0: 135 case 1: 136 break; 137 case 2: { 138 assert_locked_or_safepoint(CodeCache_lock); 139 ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock); 140 141 ShenandoahNMethodOopDetector detector; 142 nm->oops_do(&detector); 143 144 if (detector.has_oops()) { 145 ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops()); 146 nmr->assert_alive_and_correct(); 147 int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); 148 if (idx != -1) { 149 ShenandoahNMethod* old = _recorded_nms->at(idx); 150 _recorded_nms->at_put(idx, nmr); 151 delete old; 152 } else { 153 _recorded_nms->append(nmr); 154 } 155 } 156 break; 157 } 158 default: 159 ShouldNotReachHere(); 160 } 161 }; 162 163 void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) { 164 switch (ShenandoahCodeRootsStyle) { 165 case 0: 166 case 1: { 167 break; 168 } 169 case 2: { 170 assert_locked_or_safepoint(CodeCache_lock); 171 ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock); 172 173 ShenandoahNMethodOopDetector detector; 174 nm->oops_do(&detector, /* allow_dead = */ true); 175 176 if (detector.has_oops()) { 177 int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); 178 assert(idx != -1, "nmethod " PTR_FORMAT " should be registered", p2i(nm)); 179 ShenandoahNMethod* old = _recorded_nms->at(idx); 180 old->assert_same_oops(detector.oops()); 181 _recorded_nms->delete_at(idx); 182 delete old; 183 } 184 break; 185 } 186 default: 187 ShouldNotReachHere(); 188 } 189 } 190 191 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : 192 _heap(ShenandoahHeap::heap()), 193 _par_iterator(CodeCache::heaps()), 194 _claimed(0) { 195 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 196 assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers"); 197 switch (ShenandoahCodeRootsStyle) { 198 case 0: 199 case 1: { 200 // No need to do anything here 201 break; 202 } 203 case 2: { 204 CodeCache_lock->lock_without_safepoint_check(); 205 break; 206 } 207 default: 208 ShouldNotReachHere(); 209 } 210 } 211 212 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() { 213 switch (ShenandoahCodeRootsStyle) { 214 case 0: 215 case 1: { 216 // No need to do anything here 217 break; 218 } 219 case 2: { 220 CodeCache_lock->unlock(); 221 break; 222 } 223 default: 224 ShouldNotReachHere(); 225 } 226 } 227 228 template<bool CSET_FILTER> 229 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) { 230 switch (ShenandoahCodeRootsStyle) { 231 case 0: { 232 if (_seq_claimed.try_set()) { 233 CodeCache::blobs_do(f); 234 } 235 break; 236 } 237 case 1: { 238 _par_iterator.parallel_blobs_do(f); 239 break; 240 } 241 case 2: { 242 ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f); 243 break; 244 } 245 default: 246 ShouldNotReachHere(); 247 } 248 } 249 250 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 251 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f); 252 } 253 254 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 255 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f); 256 } 257 258 template <bool CSET_FILTER> 259 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) { 260 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 261 262 size_t stride = 256; // educated guess 263 264 GrowableArray<ShenandoahNMethod*>* list = ShenandoahCodeRoots::_recorded_nms; 265 266 size_t max = (size_t)list->length(); 267 while (_claimed < max) { 268 size_t cur = Atomic::add(stride, &_claimed) - stride; 269 size_t start = cur; 270 size_t end = MIN2(cur + stride, max); 271 if (start >= max) break; 272 273 for (size_t idx = start; idx < end; idx++) { 274 ShenandoahNMethod* nmr = list->at((int) idx); 275 nmr->assert_alive_and_correct(); 276 277 if (CSET_FILTER && !nmr->has_cset_oops(_heap)) { 278 continue; 279 } 280 281 f->do_code_blob(nmr->nm()); 282 } 283 } 284 } 285 286 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>* oops) { 287 _nm = nm; 288 _oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC); 289 _oops_count = oops->length(); 290 for (int c = 0; c < _oops_count; c++) { 291 _oops[c] = oops->at(c); 292 } 293 } 294 295 ShenandoahNMethod::~ShenandoahNMethod() { 296 if (_oops != NULL) { 297 FREE_C_HEAP_ARRAY(oop*, _oops); 298 } 299 } 300 301 bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) { 302 for (int c = 0; c < _oops_count; c++) { 303 oop o = RawAccess<>::oop_load(_oops[c]); 304 if (heap->in_collection_set(o)) { 305 return true; 306 } 307 } 308 return false; 309 } 310 311 #ifdef ASSERT 312 void ShenandoahNMethod::assert_alive_and_correct() { 313 assert(_nm->is_alive(), "only alive nmethods here"); 314 assert(_oops_count > 0, "should have filtered nmethods without oops before"); 315 ShenandoahHeap* heap = ShenandoahHeap::heap(); 316 for (int c = 0; c < _oops_count; c++) { 317 oop *loc = _oops[c]; 318 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); 319 oop o = RawAccess<>::oop_load(loc); 320 shenandoah_assert_correct_except(loc, o, 321 o == NULL || 322 heap->is_full_gc_move_in_progress() || 323 (VMThread::vm_operation() != NULL) && (VMThread::vm_operation()->type() == VM_Operation::VMOp_HeapWalkOperation) 324 ); 325 } 326 } 327 328 void ShenandoahNMethod::assert_same_oops(GrowableArray<oop*>* oops) { 329 assert(_oops_count == oops->length(), "should have the same number of oop*"); 330 for (int c = 0; c < _oops_count; c++) { 331 assert(_oops[c] == oops->at(c), "should be the same oop*"); 332 } 333 } 334 #endif