422 // from the C heap as is done for OopMapCache has a significant
423 // performance impact.
424 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
425 assert(_bit_mask[0] != 0, "bit mask was not allocated");
426 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
427 mask_word_size() * BytesPerWord);
428 }
429 }
430
431 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const {
432 // We use method->code_size() rather than method->identity_hash() below since
433 // the mark may not be present if a pointer to the method is already reversed.
434 return ((unsigned int) bci)
435 ^ ((unsigned int) method->max_locals() << 2)
436 ^ ((unsigned int) method->code_size() << 4)
437 ^ ((unsigned int) method->size_of_parameters() << 6);
438 }
439
440
441 OopMapCache::OopMapCache() :
442 _mut(Mutex::leaf, "An OopMapCache lock", true)
443 {
444 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
445 // Cannot call flush for initialization, since flush
446 // will check if memory should be deallocated
447 for(int i = 0; i < _size; i++) _array[i].initialize();
448 NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
449 }
450
451
452 OopMapCache::~OopMapCache() {
453 assert(_array != NULL, "sanity check");
454 // Deallocate oop maps that are allocated out-of-line
455 flush();
456 // Deallocate array
457 NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
458 FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
459 }
460
461 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
462 return &_array[i % _size];
467 }
468
469 void OopMapCache::flush_obsolete_entries() {
470 for (int i = 0; i < _size; i++)
471 if (!_array[i].is_empty() && _array[i].method()->is_old()) {
472 // Cache entry is occupied by an old redefined method and we don't want
473 // to pin it down so flush the entry.
474 if (log_is_enabled(Debug, redefine, class, oopmap)) {
475 ResourceMark rm;
476 log_debug(redefine, class, oopmap)
477 ("flush: %s(%s): cached entry @%d",
478 _array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i);
479 }
480 _array[i].flush();
481 }
482 }
483
484 void OopMapCache::lookup(const methodHandle& method,
485 int bci,
486 InterpreterOopMap* entry_for) const {
487 MutexLocker x(&_mut);
488
489 OopMapCacheEntry* entry = NULL;
490 int probe = hash_value_for(method, bci);
491
492 // Search hashtable for match
493 int i;
494 for(i = 0; i < _probe_depth; i++) {
495 entry = entry_at(probe + i);
496 if (entry->match(method, bci)) {
497 entry_for->resource_copy(entry);
498 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
499 return;
500 }
501 }
502
503 if (TraceOopMapGeneration) {
504 static int count = 0;
505 ResourceMark rm;
506 tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
507 method->print_value(); tty->cr();
|
422 // from the C heap as is done for OopMapCache has a significant
423 // performance impact.
424 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
425 assert(_bit_mask[0] != 0, "bit mask was not allocated");
426 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
427 mask_word_size() * BytesPerWord);
428 }
429 }
430
431 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const {
432 // We use method->code_size() rather than method->identity_hash() below since
433 // the mark may not be present if a pointer to the method is already reversed.
434 return ((unsigned int) bci)
435 ^ ((unsigned int) method->max_locals() << 2)
436 ^ ((unsigned int) method->code_size() << 4)
437 ^ ((unsigned int) method->size_of_parameters() << 6);
438 }
439
440
441 OopMapCache::OopMapCache() :
442 _mut(Mutex::leaf, "An OopMapCache lock", true, Monitor::_safepoint_check_never)
443 {
444 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
445 // Cannot call flush for initialization, since flush
446 // will check if memory should be deallocated
447 for(int i = 0; i < _size; i++) _array[i].initialize();
448 NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
449 }
450
451
452 OopMapCache::~OopMapCache() {
453 assert(_array != NULL, "sanity check");
454 // Deallocate oop maps that are allocated out-of-line
455 flush();
456 // Deallocate array
457 NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
458 FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
459 }
460
461 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
462 return &_array[i % _size];
467 }
468
469 void OopMapCache::flush_obsolete_entries() {
470 for (int i = 0; i < _size; i++)
471 if (!_array[i].is_empty() && _array[i].method()->is_old()) {
472 // Cache entry is occupied by an old redefined method and we don't want
473 // to pin it down so flush the entry.
474 if (log_is_enabled(Debug, redefine, class, oopmap)) {
475 ResourceMark rm;
476 log_debug(redefine, class, oopmap)
477 ("flush: %s(%s): cached entry @%d",
478 _array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i);
479 }
480 _array[i].flush();
481 }
482 }
483
484 void OopMapCache::lookup(const methodHandle& method,
485 int bci,
486 InterpreterOopMap* entry_for) const {
487 MutexLockerEx x(&_mut, Mutex::_no_safepoint_check_flag);
488
489 OopMapCacheEntry* entry = NULL;
490 int probe = hash_value_for(method, bci);
491
492 // Search hashtable for match
493 int i;
494 for(i = 0; i < _probe_depth; i++) {
495 entry = entry_at(probe + i);
496 if (entry->match(method, bci)) {
497 entry_for->resource_copy(entry);
498 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
499 return;
500 }
501 }
502
503 if (TraceOopMapGeneration) {
504 static int count = 0;
505 ResourceMark rm;
506 tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
507 method->print_value(); tty->cr();
|