--- old/src/hotspot/share/gc/z/zNMethodTable.cpp 2019-02-20 21:51:23.967048574 +0100 +++ new/src/hotspot/share/gc/z/zNMethodTable.cpp 2019-02-20 21:51:23.439039579 +0100 @@ -31,8 +31,10 @@ #include "gc/z/zHash.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethodAllocator.hpp" +#include "gc/z/zNMethodClosure.hpp" #include "gc/z/zNMethodData.hpp" #include "gc/z/zNMethodTable.hpp" +#include "gc/z/zNMethodTableIteration.hpp" #include "gc/z/zOopClosures.inline.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zWorkers.hpp" @@ -45,11 +47,9 @@ ZNMethodTableEntry* ZNMethodTable::_table = NULL; size_t ZNMethodTable::_size = 0; -ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL; -size_t ZNMethodTable::_iter_table_size = 0; size_t ZNMethodTable::_nregistered = 0; size_t ZNMethodTable::_nunregistered = 0; -volatile size_t ZNMethodTable::_claimed = 0; +ZNMethodTableIteration ZNMethodTable::_iteration; static ZNMethodData* gc_data(const nmethod* nm) { return nm->gc_data(); @@ -59,6 +59,15 @@ return nm->set_gc_data(data); } +ZNMethodTableEntry* ZNMethodTable::create(size_t size) { + void* const mem = ZNMethodAllocator::allocate(size * sizeof(ZNMethodTableEntry)); + return ::new (mem) ZNMethodTableEntry[size]; +} + +void ZNMethodTable::destroy(ZNMethodTableEntry* table) { + ZNMethodAllocator::free(table); +} + void ZNMethodTable::attach_gc_data(nmethod* nm) { GrowableArray immediate_oops; bool non_immediate_oops = false; @@ -181,7 +190,7 @@ _nunregistered, percent_of(_nunregistered, _size), 0.0); // Allocate new table - ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size]; + ZNMethodTableEntry* const new_table = ZNMethodTable::create(new_size); // Transfer all registered entries for (size_t i = 0; i < _size; i++) { @@ -191,10 +200,8 @@ } } - if (_iter_table != _table) { - // Delete old table - delete [] _table; - } + // Free old table + ZNMethodTable::destroy(_table); // Install new table _table = new_table; @@ -320,7 +327,7 @@ void ZNMethodTable::wait_until_iteration_done() { assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - while (_iter_table != NULL) { + while (_iteration.in_progress()) { CodeCache_lock->wait(Monitor::_no_safepoint_check_flag); } } @@ -360,21 +367,14 @@ ZNMethodAllocator::activate_deferred_frees(); // Prepare iteration - _iter_table = _table; - _iter_table_size = _size; - _claimed = 0; + _iteration.nmethods_do_begin(_table, _size); } void ZNMethodTable::nmethods_do_end() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // Finish iteration - if (_iter_table != _table) { - delete [] _iter_table; - } - _iter_table = NULL; - - assert(_claimed >= _iter_table_size, "Failed to claim all table entries"); + _iteration.nmethods_do_end(); // Process deferred frees ZNMethodAllocator::deactivate_and_process_deferred_frees(); @@ -431,25 +431,7 @@ } void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) { - for (;;) { - // Claim table partition. Each partition is currently sized to span - // two cache lines. This number is just a guess, but seems to work well. - const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); - const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size); - const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size); - if (partition_start == partition_end) { - // End of table - break; - } - - // Process table partition - for (size_t i = partition_start; i < partition_end; i++) { - const ZNMethodTableEntry entry = _iter_table[i]; - if (entry.registered()) { - cl->do_nmethod(entry.method()); - } - } - } + _iteration.nmethods_do(cl); } class ZNMethodTableUnlinkClosure : public ZNMethodClosure {