< prev index next >

src/hotspot/share/gc/z/zNMethodTable.cpp

Print this page




 136 ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
 137   return OrderAccess::load_acquire(&_immediate_oops);
 138 }
 139 
 140 ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
 141   ZNMethodDataImmediateOops* const data_immediate_oops =
 142     immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
 143   return Atomic::xchg(data_immediate_oops, &_immediate_oops);
 144 }
 145 
 146 static ZNMethodData* gc_data(const nmethod* nm) {
 147   return nm->gc_data<ZNMethodData>();
 148 }
 149 
 150 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 151   return nm->set_gc_data<ZNMethodData>(data);
 152 }
 153 
 154 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 155 size_t ZNMethodTable::_size = 0;
 156 ZLock ZNMethodTable::_iter_lock;
 157 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 158 size_t ZNMethodTable::_iter_table_size = 0;
 159 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 160 size_t ZNMethodTable::_nregistered = 0;
 161 size_t ZNMethodTable::_nunregistered = 0;
 162 volatile size_t ZNMethodTable::_claimed = 0;
 163 
 164 void ZNMethodTable::safe_delete(void* data) {


 165   if (data == NULL) {
 166     return;
 167   }
 168 
 169   ZLocker<ZLock> locker(&_iter_lock);
 170   if (_iter_table != NULL) {
 171     // Iteration in progress, defer delete
 172     _iter_deferred_deletes.add(data);
 173   } else {
 174     // Iteration not in progress, delete now
 175     FREE_C_HEAP_ARRAY(uint8_t, data);
 176   }
 177 }
 178 
 179 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
 180   GrowableArray<oop*> immediate_oops;
 181   bool non_immediate_oops = false;
 182 
 183   // Find all oops relocations
 184   RelocIterator iter(nm);
 185   while (iter.next()) {
 186     if (iter.type() != relocInfo::oop_type) {
 187       // Not an oop
 188       continue;
 189     }


 273 
 274   for (;;) {
 275     const ZNMethodTableEntry table_entry = table[index];
 276     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 277 
 278     if (table_entry.registered() && table_entry.method() == nm) {
 279       // Remove entry
 280       table[index] = ZNMethodTableEntry(true /* unregistered */);
 281 
 282       // Destroy GC data
 283       ZNMethodData::destroy(gc_data(nm));
 284       set_gc_data(nm, NULL);
 285       return;
 286     }
 287 
 288     index = next_index(index, size);
 289   }
 290 }
 291 
 292 void ZNMethodTable::rebuild(size_t new_size) {
 293   ZLocker<ZLock> locker(&_iter_lock);

 294   assert(is_power_of_2(new_size), "Invalid size");
 295 
 296   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 297                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 298                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 299                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 300                          _size, new_size,
 301                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 302                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 303 
 304   // Allocate new table
 305   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 306 
 307   // Transfer all registered entries
 308   for (size_t i = 0; i < _size; i++) {
 309     const ZNMethodTableEntry entry = _table[i];
 310     if (entry.registered()) {
 311       register_entry(new_table, new_size, entry);
 312     }
 313   }


 458 
 459   ResourceMark rm;
 460 
 461   log_unregister(nm);
 462 
 463   // Remove entry
 464   unregister_entry(_table, _size, nm);
 465   _nunregistered++;
 466   _nregistered--;
 467 }
 468 
 469 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 470   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 471   if (bs != NULL) {
 472     bs->disarm(nm);
 473   }
 474 }
 475 
 476 void ZNMethodTable::nmethod_entries_do_begin() {
 477   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 478   ZLocker<ZLock> locker(&_iter_lock);
 479 
 480   // Prepare iteration
 481   _iter_table = _table;
 482   _iter_table_size = _size;
 483   _claimed = 0;
 484   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 485 }
 486 
 487 void ZNMethodTable::nmethod_entries_do_end() {
 488   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 489   ZLocker<ZLock> locker(&_iter_lock);
 490 
 491   // Finish iteration
 492   if (_iter_table != _table) {
 493     delete [] _iter_table;
 494   }
 495   _iter_table = NULL;
 496 
 497   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 498 
 499   // Process deferred deletes
 500   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
 501   for (void* data; iter.next(&data);) {
 502     FREE_C_HEAP_ARRAY(uint8_t, data);
 503   }
 504   _iter_deferred_deletes.clear();
 505 
 506   // Notify iteration done
 507   CodeCache_lock->notify_all();
 508 }
 509 




 136 ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
 137   return OrderAccess::load_acquire(&_immediate_oops);
 138 }
 139 
 140 ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
 141   ZNMethodDataImmediateOops* const data_immediate_oops =
 142     immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
 143   return Atomic::xchg(data_immediate_oops, &_immediate_oops);
 144 }
 145 
 146 static ZNMethodData* gc_data(const nmethod* nm) {
 147   return nm->gc_data<ZNMethodData>();
 148 }
 149 
 150 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 151   return nm->set_gc_data<ZNMethodData>(data);
 152 }
 153 
 154 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 155 size_t ZNMethodTable::_size = 0;

 156 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 157 size_t ZNMethodTable::_iter_table_size = 0;
 158 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 159 size_t ZNMethodTable::_nregistered = 0;
 160 size_t ZNMethodTable::_nunregistered = 0;
 161 volatile size_t ZNMethodTable::_claimed = 0;
 162 
 163 void ZNMethodTable::safe_delete(void* data) {
 164   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 165 
 166   if (data == NULL) {
 167     return;
 168   }
 169 

 170   if (_iter_table != NULL) {
 171     // Iteration in progress, defer delete
 172     _iter_deferred_deletes.add(data);
 173   } else {
 174     // Iteration not in progress, delete now
 175     FREE_C_HEAP_ARRAY(uint8_t, data);
 176   }
 177 }
 178 
 179 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
 180   GrowableArray<oop*> immediate_oops;
 181   bool non_immediate_oops = false;
 182 
 183   // Find all oops relocations
 184   RelocIterator iter(nm);
 185   while (iter.next()) {
 186     if (iter.type() != relocInfo::oop_type) {
 187       // Not an oop
 188       continue;
 189     }


 273 
 274   for (;;) {
 275     const ZNMethodTableEntry table_entry = table[index];
 276     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 277 
 278     if (table_entry.registered() && table_entry.method() == nm) {
 279       // Remove entry
 280       table[index] = ZNMethodTableEntry(true /* unregistered */);
 281 
 282       // Destroy GC data
 283       ZNMethodData::destroy(gc_data(nm));
 284       set_gc_data(nm, NULL);
 285       return;
 286     }
 287 
 288     index = next_index(index, size);
 289   }
 290 }
 291 
 292 void ZNMethodTable::rebuild(size_t new_size) {
 293   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 294 
 295   assert(is_power_of_2(new_size), "Invalid size");
 296 
 297   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 298                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 299                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 300                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 301                          _size, new_size,
 302                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 303                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 304 
 305   // Allocate new table
 306   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 307 
 308   // Transfer all registered entries
 309   for (size_t i = 0; i < _size; i++) {
 310     const ZNMethodTableEntry entry = _table[i];
 311     if (entry.registered()) {
 312       register_entry(new_table, new_size, entry);
 313     }
 314   }


 459 
 460   ResourceMark rm;
 461 
 462   log_unregister(nm);
 463 
 464   // Remove entry
 465   unregister_entry(_table, _size, nm);
 466   _nunregistered++;
 467   _nregistered--;
 468 }
 469 
 470 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 471   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 472   if (bs != NULL) {
 473     bs->disarm(nm);
 474   }
 475 }
 476 
 477 void ZNMethodTable::nmethod_entries_do_begin() {
 478   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);

 479 
 480   // Prepare iteration
 481   _iter_table = _table;
 482   _iter_table_size = _size;
 483   _claimed = 0;
 484   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 485 }
 486 
 487 void ZNMethodTable::nmethod_entries_do_end() {
 488   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);

 489 
 490   // Finish iteration
 491   if (_iter_table != _table) {
 492     delete [] _iter_table;
 493   }
 494   _iter_table = NULL;
 495 
 496   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 497 
 498   // Process deferred deletes
 499   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
 500   for (void* data; iter.next(&data);) {
 501     FREE_C_HEAP_ARRAY(uint8_t, data);
 502   }
 503   _iter_deferred_deletes.clear();
 504 
 505   // Notify iteration done
 506   CodeCache_lock->notify_all();
 507 }
 508 


< prev index next >