1 /* 2 * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "code/codeCache.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 29 #include "gc/shenandoah/shenandoahUtils.hpp" 30 #include "memory/resourceArea.hpp" 31 32 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) { 33 _length = heaps->length(); 34 _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC); 35 for (int h = 0; h < _length; h++) { 36 _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h)); 37 } 38 } 39 40 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() { 41 FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters); 42 } 43 44 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) { 45 for (int c = 0; c < _length; c++) { 46 _iters[c].parallel_blobs_do(f); 47 } 48 } 49 50 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) : 51 _heap(heap), _claimed_idx(0), _finished(false) { 52 } 53 54 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) { 55 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 56 57 /* 58 * Parallel code heap walk. 59 * 60 * This code makes all threads scan all code heaps, but only one thread would execute the 61 * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread 62 * had claimed the block, it can process all blobs in it. Others have to fast-forward to 63 * next attempt without processing. 64 * 65 * Late threads would return immediately if iterator is finished. 66 */ 67 68 if (_finished) { 69 return; 70 } 71 72 int stride = 256; // educated guess 73 int stride_mask = stride - 1; 74 assert (is_power_of_2(stride), "sanity"); 75 76 int count = 0; 77 bool process_block = true; 78 79 for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) { 80 int current = count++; 81 if ((current & stride_mask) == 0) { 82 process_block = (current >= _claimed_idx) && 83 (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current); 84 } 85 if (process_block) { 86 if (cb->is_alive()) { 87 f->do_code_blob(cb); 88 #ifdef ASSERT 89 if (cb->is_nmethod()) 90 Universe::heap()->verify_nmethod((nmethod*)cb); 91 #endif 92 } 93 } 94 } 95 96 _finished = true; 97 } 98 99 class ShenandoahNMethodOopDetector : public OopClosure { 100 private: 101 ResourceMark rm; // For growable array allocation below. 102 GrowableArray<oop*> _oops; 103 104 public: 105 ShenandoahNMethodOopDetector() : _oops(10) {}; 106 107 void do_oop(oop* o) { 108 _oops.append(o); 109 } 110 void do_oop(narrowOop* o) { 111 fatal("NMethods should not have compressed oops embedded."); 112 } 113 114 GrowableArray<oop*>* oops() { 115 return &_oops; 116 } 117 118 bool has_oops() { 119 return !_oops.is_empty(); 120 } 121 }; 122 123 GrowableArray<ShenandoahNMethod*>* ShenandoahCodeRoots::_recorded_nms; 124 ShenandoahLock ShenandoahCodeRoots::_recorded_nms_lock; 125 126 void ShenandoahCodeRoots::initialize() { 127 _recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray<ShenandoahNMethod*>(100, true, mtGC); 128 } 129 130 void ShenandoahCodeRoots::add_nmethod(nmethod* nm) { 131 switch (ShenandoahCodeRootsStyle) { 132 case 0: 133 case 1: 134 break; 135 case 2: { 136 assert_locked_or_safepoint(CodeCache_lock); 137 ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock); 138 139 ShenandoahNMethodOopDetector detector; 140 nm->oops_do(&detector); 141 142 if (detector.has_oops()) { 143 ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops()); 144 nmr->assert_alive_and_correct(); 145 int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); 146 if (idx != -1) { 147 ShenandoahNMethod* old = _recorded_nms->at(idx); 148 _recorded_nms->at_put(idx, nmr); 149 delete old; 150 } else { 151 _recorded_nms->append(nmr); 152 } 153 } 154 break; 155 } 156 default: 157 ShouldNotReachHere(); 158 } 159 }; 160 161 void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) { 162 switch (ShenandoahCodeRootsStyle) { 163 case 0: 164 case 1: { 165 break; 166 } 167 case 2: { 168 assert_locked_or_safepoint(CodeCache_lock); 169 ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock); 170 171 ShenandoahNMethodOopDetector detector; 172 nm->oops_do(&detector, /* allow_zombie = */ true); 173 174 if (detector.has_oops()) { 175 int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); 176 assert(idx != -1, "nmethod " PTR_FORMAT " should be registered", p2i(nm)); 177 ShenandoahNMethod* old = _recorded_nms->at(idx); 178 old->assert_same_oops(detector.oops()); 179 _recorded_nms->delete_at(idx); 180 delete old; 181 } 182 break; 183 } 184 default: 185 ShouldNotReachHere(); 186 } 187 } 188 189 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : 190 _heap(ShenandoahHeap::heap()), 191 _par_iterator(CodeCache::heaps()), 192 _claimed(0) { 193 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 194 assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers"); 195 switch (ShenandoahCodeRootsStyle) { 196 case 0: 197 case 1: { 198 // No need to do anything here 199 break; 200 } 201 case 2: { 202 CodeCache_lock->lock_without_safepoint_check(); 203 break; 204 } 205 default: 206 ShouldNotReachHere(); 207 } 208 } 209 210 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() { 211 switch (ShenandoahCodeRootsStyle) { 212 case 0: 213 case 1: { 214 // No need to do anything here 215 break; 216 } 217 case 2: { 218 CodeCache_lock->unlock(); 219 break; 220 } 221 default: 222 ShouldNotReachHere(); 223 } 224 } 225 226 template<bool CSET_FILTER> 227 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) { 228 switch (ShenandoahCodeRootsStyle) { 229 case 0: { 230 if (_seq_claimed.try_set()) { 231 CodeCache::blobs_do(f); 232 } 233 break; 234 } 235 case 1: { 236 _par_iterator.parallel_blobs_do(f); 237 break; 238 } 239 case 2: { 240 ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f); 241 break; 242 } 243 default: 244 ShouldNotReachHere(); 245 } 246 } 247 248 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 249 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f); 250 } 251 252 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 253 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f); 254 } 255 256 template <bool CSET_FILTER> 257 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) { 258 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 259 260 size_t stride = 256; // educated guess 261 262 GrowableArray<ShenandoahNMethod*>* list = ShenandoahCodeRoots::_recorded_nms; 263 264 size_t max = (size_t)list->length(); 265 while (_claimed < max) { 266 size_t cur = Atomic::add(stride, &_claimed) - stride; 267 size_t start = cur; 268 size_t end = MIN2(cur + stride, max); 269 if (start >= max) break; 270 271 for (size_t idx = start; idx < end; idx++) { 272 ShenandoahNMethod* nmr = list->at((int) idx); 273 nmr->assert_alive_and_correct(); 274 275 if (CSET_FILTER && !nmr->has_cset_oops(_heap)) { 276 continue; 277 } 278 279 f->do_code_blob(nmr->nm()); 280 } 281 } 282 } 283 284 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>* oops) { 285 _nm = nm; 286 _oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC); 287 _oops_count = oops->length(); 288 for (int c = 0; c < _oops_count; c++) { 289 _oops[c] = oops->at(c); 290 } 291 } 292 293 ShenandoahNMethod::~ShenandoahNMethod() { 294 if (_oops != NULL) { 295 FREE_C_HEAP_ARRAY(oop*, _oops); 296 } 297 } 298 299 bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) { 300 for (int c = 0; c < _oops_count; c++) { 301 oop o = RawAccess<>::oop_load(_oops[c]); 302 if (heap->in_collection_set(o)) { 303 return true; 304 } 305 } 306 return false; 307 } 308 309 #ifdef ASSERT 310 void ShenandoahNMethod::assert_alive_and_correct() { 311 assert(_nm->is_alive(), "only alive nmethods here"); 312 assert(_oops_count > 0, "should have filtered nmethods without oops before"); 313 ShenandoahHeap* heap = ShenandoahHeap::heap(); 314 for (int c = 0; c < _oops_count; c++) { 315 oop *loc = _oops[c]; 316 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); 317 oop o = RawAccess<>::oop_load(loc); 318 shenandoah_assert_correct_except(loc, o, 319 o == NULL || 320 heap->is_full_gc_move_in_progress() || 321 (VMThread::vm_operation() != NULL) && (VMThread::vm_operation()->type() == VM_Operation::VMOp_HeapWalkOperation) 322 ); 323 } 324 } 325 326 void ShenandoahNMethod::assert_same_oops(GrowableArray<oop*>* oops) { 327 assert(_oops_count == oops->length(), "should have the same number of oop*"); 328 for (int c = 0; c < _oops_count; c++) { 329 assert(_oops[c] == oops->at(c), "should be the same oop*"); 330 } 331 } 332 #endif