431 ^ ((unsigned int) method->size_of_parameters() << 6);
432 }
433
434 OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL;
435
436 OopMapCache::OopMapCache() {
437 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass);
438 for(int i = 0; i < _size; i++) _array[i] = NULL;
439 }
440
441
442 OopMapCache::~OopMapCache() {
443 assert(_array != NULL, "sanity check");
444 // Deallocate oop maps that are allocated out-of-line
445 flush();
446 // Deallocate array
447 FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array);
448 }
449
450 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
451 return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
452 }
453
454 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
455 return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
456 }
457
458 void OopMapCache::flush() {
459 for (int i = 0; i < _size; i++) {
460 OopMapCacheEntry* entry = _array[i];
461 if (entry != NULL) {
462 _array[i] = NULL; // no barrier, only called in OopMapCache destructor
463 entry->flush();
464 FREE_C_HEAP_OBJ(entry);
465 }
466 }
467 }
468
469 void OopMapCache::flush_obsolete_entries() {
470 assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
471 for (int i = 0; i < _size; i++) {
472 OopMapCacheEntry* entry = _array[i];
473 if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) {
474 // Cache entry is occupied by an old redefined method and we don't want
475 // to pin it down so flush the entry.
547
548 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
549 // where the first entry in the collision array is replaced with the new one.
550 OopMapCacheEntry* old = entry_at(probe + 0);
551 if (put_at(probe + 0, tmp, old)) {
552 enqueue_for_cleanup(old);
553 } else {
554 enqueue_for_cleanup(tmp);
555 }
556
557 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
558 return;
559 }
560
561 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
562 bool success = false;
563 OopMapCacheEntry* head;
564 do {
565 head = _old_entries;
566 entry->_next = head;
567 success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
568 } while (!success);
569
570 if (log_is_enabled(Debug, interpreter, oopmap)) {
571 ResourceMark rm;
572 log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
573 entry->method()->name_and_sig_as_C_string(), entry->bci());
574 }
575 }
576
577 // This is called after GC threads are done and nothing is accessing the old_entries
578 // list, so no synchronization needed.
579 void OopMapCache::cleanup_old_entries() {
580 OopMapCacheEntry* entry = _old_entries;
581 _old_entries = NULL;
582 while (entry != NULL) {
583 if (log_is_enabled(Debug, interpreter, oopmap)) {
584 ResourceMark rm;
585 log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
586 entry->method()->name_and_sig_as_C_string(), entry->bci());
587 }
|
431 ^ ((unsigned int) method->size_of_parameters() << 6);
432 }
433
434 OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL;
435
436 OopMapCache::OopMapCache() {
437 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass);
438 for(int i = 0; i < _size; i++) _array[i] = NULL;
439 }
440
441
442 OopMapCache::~OopMapCache() {
443 assert(_array != NULL, "sanity check");
444 // Deallocate oop maps that are allocated out-of-line
445 flush();
446 // Deallocate array
447 FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array);
448 }
449
450 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
451 return OrderAccess::load_acquire(&(_array[i % _size]));
452 }
453
454 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
455 return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
456 }
457
458 void OopMapCache::flush() {
459 for (int i = 0; i < _size; i++) {
460 OopMapCacheEntry* entry = _array[i];
461 if (entry != NULL) {
462 _array[i] = NULL; // no barrier, only called in OopMapCache destructor
463 entry->flush();
464 FREE_C_HEAP_OBJ(entry);
465 }
466 }
467 }
468
469 void OopMapCache::flush_obsolete_entries() {
470 assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
471 for (int i = 0; i < _size; i++) {
472 OopMapCacheEntry* entry = _array[i];
473 if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) {
474 // Cache entry is occupied by an old redefined method and we don't want
475 // to pin it down so flush the entry.
547
548 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
549 // where the first entry in the collision array is replaced with the new one.
550 OopMapCacheEntry* old = entry_at(probe + 0);
551 if (put_at(probe + 0, tmp, old)) {
552 enqueue_for_cleanup(old);
553 } else {
554 enqueue_for_cleanup(tmp);
555 }
556
557 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
558 return;
559 }
560
561 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
562 bool success = false;
563 OopMapCacheEntry* head;
564 do {
565 head = _old_entries;
566 entry->_next = head;
567 success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
568 } while (!success);
569
570 if (log_is_enabled(Debug, interpreter, oopmap)) {
571 ResourceMark rm;
572 log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
573 entry->method()->name_and_sig_as_C_string(), entry->bci());
574 }
575 }
576
577 // This is called after GC threads are done and nothing is accessing the old_entries
578 // list, so no synchronization needed.
579 void OopMapCache::cleanup_old_entries() {
580 OopMapCacheEntry* entry = _old_entries;
581 _old_entries = NULL;
582 while (entry != NULL) {
583 if (log_is_enabled(Debug, interpreter, oopmap)) {
584 ResourceMark rm;
585 log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
586 entry->method()->name_and_sig_as_C_string(), entry->bci());
587 }
|