8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/oopMapCache.hpp" 27 #include "logging/log.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/handles.inline.hpp" 32 #include "runtime/signature.hpp" 33 34 class OopMapCacheEntry: private InterpreterOopMap { 35 friend class InterpreterOopMap; 36 friend class OopMapForCacheEntry; 37 friend class OopMapCache; 38 friend class VerifyClosure; 39 40 protected: 41 // Initialization 42 void fill(const methodHandle& method, int bci); 43 // fills the bit mask for native calls 44 void fill_for_native(const methodHandle& method); 45 void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); 46 47 // Deallocate bit masks and initialize fields 48 void flush(); 49 50 private: 51 void allocate_bit_mask(); // allocates the bit mask on C heap f necessary 52 void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary 53 bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); 54 55 public: 56 OopMapCacheEntry() : InterpreterOopMap() { 57 #ifdef ASSERT 58 _resource_allocate_bit_mask = false; 59 #endif 60 } 61 }; 62 63 64 // Implementation of OopMapForCacheEntry 65 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) 66 67 class OopMapForCacheEntry: public GenerateOopMap { 68 OopMapCacheEntry *_entry; 69 int _bci; 70 int _stack_top; 71 72 virtual bool report_results() const { return false; } 73 virtual bool possible_gc_point (BytecodeStream *bcs); 74 virtual void fill_stackmap_prolog (int nof_gc_points); 75 virtual void fill_stackmap_epilog (); 76 virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, 246 _mask = mask; 247 _size = size; 248 // initialize with 0 249 int i = (size + BitsPerWord - 1) / BitsPerWord; 250 while (i-- > 0) _mask[i] = 0; 251 } 252 253 void generate() { 254 NativeSignatureIterator::iterate(); 255 } 256 }; 257 258 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { 259 // Check mask includes map 260 VerifyClosure blk(this); 261 iterate_oop(&blk); 262 if (blk.failed()) return false; 263 264 // Check if map is generated correctly 265 // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) 266 if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals); 267 268 for(int i = 0; i < max_locals; i++) { 269 bool v1 = is_oop(i) ? true : false; 270 bool v2 = vars[i].is_reference() ? true : false; 271 assert(v1 == v2, "locals oop mask generation error"); 272 if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); 273 } 274 275 if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); } 276 for(int j = 0; j < stack_top; j++) { 277 bool v1 = is_oop(max_locals + j) ? true : false; 278 bool v2 = stack[j].is_reference() ? true : false; 279 assert(v1 == v2, "stack oop mask generation error"); 280 if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); 281 } 282 if (TraceOopMapGeneration && Verbose) tty->cr(); 283 return true; 284 } 285 286 void OopMapCacheEntry::allocate_bit_mask() { 287 if (mask_size() > small_mask_limit) { 288 assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); 289 _bit_mask[0] = (intptr_t) 290 NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); 291 } 292 } 293 294 void OopMapCacheEntry::deallocate_bit_mask() { 295 if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { 296 assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 297 "This bit mask should not be in the resource area"); 298 FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); 299 debug_only(_bit_mask[0] = 0;) 300 } 301 } 302 356 cell = stack; 357 } 358 359 // set oop bit 360 if ( cell->is_reference()) { 361 value |= (mask << oop_bit_number ); 362 } 363 364 // set dead bit 365 if (!cell->is_live()) { 366 value |= (mask << dead_bit_number); 367 assert(!cell->is_reference(), "dead value marked as oop"); 368 } 369 } 370 371 // make sure last word is stored 372 bit_mask()[word_index] = value; 373 374 // verify bit mask 375 assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); 376 377 378 } 379 380 void OopMapCacheEntry::flush() { 381 deallocate_bit_mask(); 382 initialize(); 383 } 384 385 386 // Implementation of OopMapCache 387 388 #ifndef PRODUCT 389 390 static long _total_memory_usage = 0; 391 392 long OopMapCache::memory_usage() { 393 return _total_memory_usage; 394 } 395 396 #endif 397 398 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { 399 assert(_resource_allocate_bit_mask, 400 "Should not resource allocate the _bit_mask"); 401 402 set_method(from->method()); 403 set_bci(from->bci()); 404 set_mask_size(from->mask_size()); 405 set_expression_stack_size(from->expression_stack_size()); 406 407 // Is the bit mask contained in the entry? 408 if (from->mask_size() <= small_mask_limit) { 409 memcpy((void *)_bit_mask, (void *)from->_bit_mask, 410 mask_word_size() * BytesPerWord); 411 } else { 412 // The expectation is that this InterpreterOopMap is a recently created 413 // and empty. It is used to get a copy of a cached entry. 414 // If the bit mask has a value, it should be in the 415 // resource area. 416 assert(_bit_mask[0] == 0 || 417 Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 418 "The bit mask should have been allocated from a resource area"); 419 // Allocate the bit_mask from a Resource area for performance. Allocating 420 // from the C heap as is done for OopMapCache has a significant 421 // performance impact. 422 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); 423 assert(_bit_mask[0] != 0, "bit mask was not allocated"); 424 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], 425 mask_word_size() * BytesPerWord); 426 } 427 } 428 429 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const { 430 // We use method->code_size() rather than method->identity_hash() below since 431 // the mark may not be present if a pointer to the method is already reversed. 432 return ((unsigned int) bci) 433 ^ ((unsigned int) method->max_locals() << 2) 434 ^ ((unsigned int) method->code_size() << 4) 435 ^ ((unsigned int) method->size_of_parameters() << 6); 436 } 437 438 439 OopMapCache::OopMapCache() : 440 _mut(Mutex::leaf, "An OopMapCache lock", true) 441 { 442 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass); 443 // Cannot call flush for initialization, since flush 444 // will check if memory should be deallocated 445 for(int i = 0; i < _size; i++) _array[i].initialize(); 446 NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) 447 } 448 449 450 OopMapCache::~OopMapCache() { 451 assert(_array != NULL, "sanity check"); 452 // Deallocate oop maps that are allocated out-of-line 453 flush(); 454 // Deallocate array 455 NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) 456 FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array); 457 } 458 459 OopMapCacheEntry* OopMapCache::entry_at(int i) const { 460 return &_array[i % _size]; 461 } 462 463 void OopMapCache::flush() { 464 for (int i = 0; i < _size; i++) _array[i].flush(); 465 } 466 467 void OopMapCache::flush_obsolete_entries() { 468 for (int i = 0; i < _size; i++) 469 if (!_array[i].is_empty() && _array[i].method()->is_old()) { 470 // Cache entry is occupied by an old redefined method and we don't want 471 // to pin it down so flush the entry. 472 if (log_is_enabled(Debug, redefine, class, oopmap)) { 473 ResourceMark rm; 474 log_debug(redefine, class, oopmap) 475 ("flush: %s(%s): cached entry @%d", 476 _array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i); 477 } 478 _array[i].flush(); 479 } 480 } 481 482 void OopMapCache::lookup(const methodHandle& method, 483 int bci, 484 InterpreterOopMap* entry_for) const { 485 MutexLocker x(&_mut); 486 487 OopMapCacheEntry* entry = NULL; 488 int probe = hash_value_for(method, bci); 489 490 // Search hashtable for match 491 int i; 492 for(i = 0; i < _probe_depth; i++) { 493 entry = entry_at(probe + i); 494 if (entry->match(method, bci)) { 495 entry_for->resource_copy(entry); 496 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 497 return; 498 } 499 } 500 501 if (TraceOopMapGeneration) { 502 static int count = 0; 503 ResourceMark rm; 504 tty->print("%d - Computing oopmap at bci %d for ", ++count, bci); 505 method->print_value(); tty->cr(); 506 } 507 508 // Entry is not in hashtable. 509 // Compute entry and return it 510 511 if (method->should_not_be_cached()) { 512 // It is either not safe or not a good idea to cache this Method* 513 // at this time. We give the caller of lookup() a copy of the 514 // interesting info via parameter entry_for, but we don't add it to 515 // the cache. See the gory details in Method*.cpp. 516 compute_one_oop_map(method, bci, entry_for); 517 return; 518 } 519 520 // First search for an empty slot 521 for(i = 0; i < _probe_depth; i++) { 522 entry = entry_at(probe + i); 523 if (entry->is_empty()) { 524 entry->fill(method, bci); 525 entry_for->resource_copy(entry); 526 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 527 return; 528 } 529 } 530 531 if (TraceOopMapGeneration) { 532 ResourceMark rm; 533 tty->print_cr("*** collision in oopmap cache - flushing item ***"); 534 } 535 536 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm 537 //entry_at(probe + _probe_depth - 1)->flush(); 538 //for(i = _probe_depth - 1; i > 0; i--) { 539 // // Coping entry[i] = entry[i-1]; 540 // OopMapCacheEntry *to = entry_at(probe + i); 541 // OopMapCacheEntry *from = entry_at(probe + i - 1); 542 // to->copy(from); 543 // } 544 545 assert(method->is_method(), "gaga"); 546 547 entry = entry_at(probe + 0); 548 entry->fill(method, bci); 549 550 // Copy the newly cached entry to input parameter 551 entry_for->resource_copy(entry); 552 553 if (TraceOopMapGeneration) { 554 ResourceMark rm; 555 tty->print("Done with "); 556 method->print_value(); tty->cr(); 557 } 558 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 559 560 return; 561 } 562 563 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) { 564 // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack 565 OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); 566 tmp->initialize(); 567 tmp->fill(method, bci); 568 entry->resource_copy(tmp); 569 FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp); 570 } | 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/oopMapCache.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/handles.inline.hpp" 33 #include "runtime/signature.hpp" 34 35 class OopMapCacheEntry: private InterpreterOopMap { 36 friend class InterpreterOopMap; 37 friend class OopMapForCacheEntry; 38 friend class OopMapCache; 39 friend class VerifyClosure; 40 41 private: 42 OopMapCacheEntry* _next; 43 44 protected: 45 // Initialization 46 void fill(const methodHandle& method, int bci); 47 // fills the bit mask for native calls 48 void fill_for_native(const methodHandle& method); 49 void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); 50 51 // Deallocate bit masks and initialize fields 52 void flush(); 53 54 private: 55 void allocate_bit_mask(); // allocates the bit mask on C heap f necessary 56 void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary 57 bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); 58 59 public: 60 OopMapCacheEntry() : InterpreterOopMap() { 61 _next = NULL; 62 #ifdef ASSERT 63 _resource_allocate_bit_mask = false; 64 #endif 65 } 66 }; 67 68 69 // Implementation of OopMapForCacheEntry 70 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) 71 72 class OopMapForCacheEntry: public GenerateOopMap { 73 OopMapCacheEntry *_entry; 74 int _bci; 75 int _stack_top; 76 77 virtual bool report_results() const { return false; } 78 virtual bool possible_gc_point (BytecodeStream *bcs); 79 virtual void fill_stackmap_prolog (int nof_gc_points); 80 virtual void fill_stackmap_epilog (); 81 virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, 251 _mask = mask; 252 _size = size; 253 // initialize with 0 254 int i = (size + BitsPerWord - 1) / BitsPerWord; 255 while (i-- > 0) _mask[i] = 0; 256 } 257 258 void generate() { 259 NativeSignatureIterator::iterate(); 260 } 261 }; 262 263 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { 264 // Check mask includes map 265 VerifyClosure blk(this); 266 iterate_oop(&blk); 267 if (blk.failed()) return false; 268 269 // Check if map is generated correctly 270 // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) 271 Log(interpreter, oopmap) logv; 272 LogStream st(logv.trace()); 273 274 st.print("Locals (%d): ", max_locals); 275 for(int i = 0; i < max_locals; i++) { 276 bool v1 = is_oop(i) ? true : false; 277 bool v2 = vars[i].is_reference() ? true : false; 278 assert(v1 == v2, "locals oop mask generation error"); 279 st.print("%d", v1 ? 1 : 0); 280 } 281 st.cr(); 282 283 st.print("Stack (%d): ", stack_top); 284 for(int j = 0; j < stack_top; j++) { 285 bool v1 = is_oop(max_locals + j) ? true : false; 286 bool v2 = stack[j].is_reference() ? true : false; 287 assert(v1 == v2, "stack oop mask generation error"); 288 st.print("%d", v1 ? 1 : 0); 289 } 290 st.cr(); 291 return true; 292 } 293 294 void OopMapCacheEntry::allocate_bit_mask() { 295 if (mask_size() > small_mask_limit) { 296 assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); 297 _bit_mask[0] = (intptr_t) 298 NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); 299 } 300 } 301 302 void OopMapCacheEntry::deallocate_bit_mask() { 303 if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { 304 assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 305 "This bit mask should not be in the resource area"); 306 FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); 307 debug_only(_bit_mask[0] = 0;) 308 } 309 } 310 364 cell = stack; 365 } 366 367 // set oop bit 368 if ( cell->is_reference()) { 369 value |= (mask << oop_bit_number ); 370 } 371 372 // set dead bit 373 if (!cell->is_live()) { 374 value |= (mask << dead_bit_number); 375 assert(!cell->is_reference(), "dead value marked as oop"); 376 } 377 } 378 379 // make sure last word is stored 380 bit_mask()[word_index] = value; 381 382 // verify bit mask 383 assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); 384 } 385 386 void OopMapCacheEntry::flush() { 387 deallocate_bit_mask(); 388 initialize(); 389 } 390 391 392 // Implementation of OopMapCache 393 394 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { 395 assert(_resource_allocate_bit_mask, 396 "Should not resource allocate the _bit_mask"); 397 398 set_method(from->method()); 399 set_bci(from->bci()); 400 set_mask_size(from->mask_size()); 401 set_expression_stack_size(from->expression_stack_size()); 402 403 // Is the bit mask contained in the entry? 404 if (from->mask_size() <= small_mask_limit) { 405 memcpy((void *)_bit_mask, (void *)from->_bit_mask, 406 mask_word_size() * BytesPerWord); 407 } else { 408 // The expectation is that this InterpreterOopMap is a recently created 409 // and empty. It is used to get a copy of a cached entry. 410 // If the bit mask has a value, it should be in the 411 // resource area. 412 assert(_bit_mask[0] == 0 || 413 Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 414 "The bit mask should have been allocated from a resource area"); 415 // Allocate the bit_mask from a Resource area for performance. Allocating 416 // from the C heap as is done for OopMapCache has a significant 417 // performance impact. 418 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); 419 assert(_bit_mask[0] != 0, "bit mask was not allocated"); 420 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], 421 mask_word_size() * BytesPerWord); 422 } 423 } 424 425 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const { 426 // We use method->code_size() rather than method->identity_hash() below since 427 // the mark may not be present if a pointer to the method is already reversed. 428 return ((unsigned int) bci) 429 ^ ((unsigned int) method->max_locals() << 2) 430 ^ ((unsigned int) method->code_size() << 4) 431 ^ ((unsigned int) method->size_of_parameters() << 6); 432 } 433 434 OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL; 435 436 OopMapCache::OopMapCache() { 437 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass); 438 for(int i = 0; i < _size; i++) _array[i] = NULL; 439 } 440 441 442 OopMapCache::~OopMapCache() { 443 assert(_array != NULL, "sanity check"); 444 // Deallocate oop maps that are allocated out-of-line 445 flush(); 446 // Deallocate array 447 FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array); 448 } 449 450 OopMapCacheEntry* OopMapCache::entry_at(int i) const { 451 return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size])); 452 } 453 454 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { 455 return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old; 456 } 457 458 void OopMapCache::flush() { 459 for (int i = 0; i < _size; i++) { 460 OopMapCacheEntry* entry = _array[i]; 461 if (entry != NULL) { 462 _array[i] = NULL; // no barrier, only called in OopMapCache destructor 463 entry->flush(); 464 FREE_C_HEAP_OBJ(entry); 465 } 466 } 467 } 468 469 void OopMapCache::flush_obsolete_entries() { 470 assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint"); 471 for (int i = 0; i < _size; i++) { 472 OopMapCacheEntry* entry = _array[i]; 473 if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) { 474 // Cache entry is occupied by an old redefined method and we don't want 475 // to pin it down so flush the entry. 476 if (log_is_enabled(Debug, redefine, class, oopmap)) { 477 ResourceMark rm; 478 log_debug(redefine, class, interpreter, oopmap) 479 ("flush: %s(%s): cached entry @%d", 480 entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i); 481 } 482 _array[i] = NULL; 483 entry->flush(); 484 FREE_C_HEAP_OBJ(entry); 485 } 486 } 487 } 488 489 // Called by GC for thread root scan during a safepoint only. The other interpreted frame oopmaps 490 // are generated locally and not cached. 491 void OopMapCache::lookup(const methodHandle& method, 492 int bci, 493 InterpreterOopMap* entry_for) { 494 assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint"); 495 int probe = hash_value_for(method, bci); 496 int i; 497 OopMapCacheEntry* entry = NULL; 498 499 if (log_is_enabled(Debug, interpreter, oopmap)) { 500 static int count = 0; 501 ResourceMark rm; 502 log_debug(interpreter, oopmap) 503 ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci, 504 method()->name_and_sig_as_C_string(), probe); 505 } 506 507 // Search hashtable for match 508 for(i = 0; i < _probe_depth; i++) { 509 entry = entry_at(probe + i); 510 if (entry != NULL && !entry->is_empty() && entry->match(method, bci)) { 511 entry_for->resource_copy(entry); 512 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 513 log_debug(interpreter, oopmap)("- found at hash %d", probe + i); 514 return; 515 } 516 } 517 518 // Entry is not in hashtable. 519 // Compute entry 520 521 OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass); 522 tmp->initialize(); 523 tmp->fill(method, bci); 524 entry_for->resource_copy(tmp); 525 526 if (method->should_not_be_cached()) { 527 // It is either not safe or not a good idea to cache this Method* 528 // at this time. We give the caller of lookup() a copy of the 529 // interesting info via parameter entry_for, but we don't add it to 530 // the cache. See the gory details in Method*.cpp. 531 FREE_C_HEAP_OBJ(tmp); 532 return; 533 } 534 535 // First search for an empty slot 536 for(i = 0; i < _probe_depth; i++) { 537 entry = entry_at(probe + i); 538 if (entry == NULL) { 539 if (put_at(probe + i, tmp, NULL)) { 540 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 541 return; 542 } 543 } 544 } 545 546 log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***"); 547 548 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm 549 // where the first entry in the collision array is replaced with the new one. 550 OopMapCacheEntry* old = entry_at(probe + 0); 551 if (put_at(probe + 0, tmp, old)) { 552 enqueue_for_cleanup(old); 553 } else { 554 enqueue_for_cleanup(tmp); 555 } 556 557 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 558 return; 559 } 560 561 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { 562 bool success = false; 563 OopMapCacheEntry* head; 564 do { 565 head = _old_entries; 566 entry->_next = head; 567 success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head; 568 } while (!success); 569 570 if (log_is_enabled(Debug, interpreter, oopmap)) { 571 ResourceMark rm; 572 log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup", 573 entry->method()->name_and_sig_as_C_string(), entry->bci()); 574 } 575 } 576 577 // This is called after GC threads are done and nothing is accessing the old_entries 578 // list, so no synchronization needed. 579 void OopMapCache::cleanup_old_entries() { 580 OopMapCacheEntry* entry = _old_entries; 581 _old_entries = NULL; 582 while (entry != NULL) { 583 if (log_is_enabled(Debug, interpreter, oopmap)) { 584 ResourceMark rm; 585 log_debug(interpreter, oopmap)("cleanup entry %s at bci %d", 586 entry->method()->name_and_sig_as_C_string(), entry->bci()); 587 } 588 OopMapCacheEntry* next = entry->_next; 589 entry->flush(); 590 FREE_C_HEAP_OBJ(entry); 591 entry = next; 592 } 593 } 594 595 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) { 596 // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack 597 OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); 598 tmp->initialize(); 599 tmp->fill(method, bci); 600 entry->resource_copy(tmp); 601 FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp); 602 } |