1 /*
  2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "code/compiledIC.hpp"
 26 #include "code/relocInfo.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "code/nmethod.hpp"
 29 #include "gc/z/zGlobals.hpp"
 30 #include "gc/z/zHash.inline.hpp"
 31 #include "gc/z/zLock.inline.hpp"
 32 #include "gc/z/zNMethodTable.hpp"
 33 #include "gc/z/zOopClosures.inline.hpp"
 34 #include "gc/z/zTask.hpp"
 35 #include "gc/z/zWorkers.hpp"
 36 #include "logging/log.hpp"
 37 #include "memory/allocation.inline.hpp"
 38 #include "memory/resourceArea.hpp"
 39 #include "oops/oop.inline.hpp"
 40 #include "runtime/atomic.hpp"
 41 #include "utilities/debug.hpp"
 42 #include "utilities/spinYield.hpp"
 43 
 44 class ZNMethodWithImmediateOops {
 45 private:
 46   nmethod* const _nm;
 47   const size_t   _nimmediate_oops;
 48 
 49   static size_t header_size();
 50 
 51   ZNMethodWithImmediateOops(nmethod* nm, const GrowableArray<oop*>& immediate_oops);
 52 
 53 public:
 54   static ZNMethodWithImmediateOops* create(nmethod* nm, const GrowableArray<oop*>& immediate_oops);
 55   static void destroy(ZNMethodWithImmediateOops* nmi);
 56 
 57   nmethod* method() const;
 58   size_t immediate_oops_count() const;
 59   oop** immediate_oops_begin() const;
 60   oop** immediate_oops_begin_safe() const;
 61   oop** immediate_oops_end() const;
 62 };
 63 
 64 size_t ZNMethodWithImmediateOops::header_size() {
 65   const size_t size = sizeof(ZNMethodWithImmediateOops);
 66   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
 67   return size;
 68 }
 69 
 70 ZNMethodWithImmediateOops::ZNMethodWithImmediateOops(nmethod* nm, const GrowableArray<oop*>& immediate_oops) :
 71     _nm(nm),
 72     _nimmediate_oops(immediate_oops.length()) {
 73   // Save all immediate oops
 74   for (size_t i = 0; i < _nimmediate_oops; i++) {
 75     immediate_oops_begin()[i] = immediate_oops.at(i);
 76   }
 77 }
 78 
 79 ZNMethodWithImmediateOops* ZNMethodWithImmediateOops::create(nmethod* nm, const GrowableArray<oop*>& immediate_oops) {
 80   // Allocate memory for the ZNMethodWithImmediateOops object
 81   // plus the immediate oop* array that follows right after.
 82   const size_t size = header_size() + (sizeof(oop*) * immediate_oops.length());
 83   void* const method_with_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
 84   return ::new (method_with_immediate_oops) ZNMethodWithImmediateOops(nm, immediate_oops);
 85 }
 86 
 87 void ZNMethodWithImmediateOops::destroy(ZNMethodWithImmediateOops* nmi) {
 88   FREE_C_HEAP_ARRAY(uint8_t, nmi);
 89 }
 90 
 91 nmethod* ZNMethodWithImmediateOops::method() const {
 92   return _nm;
 93 }
 94 
 95 size_t ZNMethodWithImmediateOops::immediate_oops_count() const {
 96   return _nimmediate_oops;
 97 }
 98 
 99 oop** ZNMethodWithImmediateOops::immediate_oops_begin() const {
100   // The immediate oop* array starts immediately after this object
101   return (oop**)((uintptr_t)this + header_size());
102 }
103 
104 oop** ZNMethodWithImmediateOops::immediate_oops_begin_safe() const {
105   // Non-entrant nmethods have a jump instruction patched into the beginning
106   // of the verified entry point, which could have overwritten an immediate
107   // oop. If so, make sure we skip over that oop.
108   if (_nm->is_not_entrant()) {
109     oop* const first_immediate_oop = *immediate_oops_begin();
110     oop* const safe_begin = (oop*)(_nm->verified_entry_point() + NativeJump::instruction_size);
111     if (first_immediate_oop < safe_begin) {
112       // First immediate oop overwritten, skip it
113       return immediate_oops_begin() + 1;
114     }
115   }
116 
117   // First immediate oop not overwritten
118   return immediate_oops_begin();
119 }
120 
121 
122 oop** ZNMethodWithImmediateOops::immediate_oops_end() const {
123   return immediate_oops_begin() + immediate_oops_count();
124 }
125 
126 ZLock ZNMethodTable::_rebuild_lock;
127 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
128 ZNMethodTableEntry* ZNMethodTable::_scanned_table = NULL;
129 size_t ZNMethodTable::_size = 0;
130 size_t ZNMethodTable::_scanned_table_size = 0;
131 size_t ZNMethodTable::_nregistered = 0;
132 size_t ZNMethodTable::_nunregistered = 0;
133 volatile size_t ZNMethodTable::_claimed = 0;
134 
135 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
136   GrowableArray<oop*> immediate_oops;
137   bool non_immediate_oops = false;
138 
139   // Find all oops relocations
140   RelocIterator iter(nm);
141   while (iter.next()) {
142     if (iter.type() != relocInfo::oop_type) {
143       // Not an oop
144       continue;
145     }
146 
147     oop_Relocation* r = iter.oop_reloc();
148 
149     if (!r->oop_is_immediate()) {
150       // Non-immediate oop found
151       non_immediate_oops = true;
152       continue;
153     }
154 
155     if (r->oop_value() != NULL) {
156       // Non-NULL immediate oop found. NULL oops can safely be
157       // ignored since the method will be re-registered if they
158       // are later patched to be non-NULL.
159       immediate_oops.push(r->oop_addr());
160     }
161   }
162 
163   // oops_count() returns the number of oops in the oop table plus one
164   if (immediate_oops.is_empty() && nm->oops_count() == 1) {
165     // No oops found, but return any entry anyway
166     return ZNMethodTableEntry(nm, non_immediate_oops);
167   }
168 
169   if (immediate_oops.is_empty()) {
170     // No immediate oops found, return entry without immediate oops
171     return ZNMethodTableEntry(nm, non_immediate_oops);
172   }
173 
174   // Return entry with immediate oops
175   return ZNMethodTableEntry(ZNMethodWithImmediateOops::create(nm, immediate_oops), non_immediate_oops);
176 }
177 
178 void ZNMethodTable::destroy_entry(ZNMethodTableEntry entry) {
179   if (entry.immediate_oops()) {
180     ZNMethodWithImmediateOops::destroy(entry.method_with_immediate_oops());
181   }
182 }
183 
184 nmethod* ZNMethodTable::method(ZNMethodTableEntry entry) {
185   return entry.immediate_oops() ? entry.method_with_immediate_oops()->method() : entry.method();
186 }
187 
188 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
189   assert(is_power_of_2(size), "Invalid size");
190   const size_t mask = size - 1;
191   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
192   return hash & mask;
193 }
194 
195 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
196   assert(is_power_of_2(size), "Invalid size");
197   const size_t mask = size - 1;
198   return (prev_index + 1) & mask;
199 }
200 
201 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
202   const nmethod* const nm = method(entry);
203   size_t index = first_index(nm, size);
204 
205   for (;;) {
206     const ZNMethodTableEntry table_entry = table[index];
207 
208     if (!table_entry.registered() && !table_entry.unregistered()) {
209       // Insert new entry
210       table[index] = entry;
211       return true;
212     }
213 
214     if (table_entry.registered() && method(table_entry) == nm) {
215       // Replace existing entry
216       destroy_entry(table_entry);
217       table[index] = entry;
218       return false;
219     }
220 
221     index = next_index(index, size);
222   }
223 }
224 
225 bool ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, const nmethod* nm) {
226   if (size == 0) {
227     // Table is empty
228     return false;
229   }
230 
231   size_t index = first_index(nm, size);
232 
233   for (;;) {
234     const ZNMethodTableEntry table_entry = table[index];
235 
236     if (!table_entry.registered() && !table_entry.unregistered()) {
237       // Entry not found
238       return false;
239     }
240 
241     if (table_entry.registered() && method(table_entry) == nm) {
242       // Remove entry
243       destroy_entry(table_entry);
244       table[index] = ZNMethodTableEntry(true /* unregistered */);
245       return true;
246     }
247 
248     index = next_index(index, size);
249   }
250 }
251 
252 void ZNMethodTable::rebuild(size_t new_size) {
253   ZLocker m(&_rebuild_lock);
254   assert(is_power_of_2(new_size), "Invalid size");
255 
256   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
257                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
258                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
259                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
260                          _size, new_size,
261                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
262                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
263 
264   // Allocate new table
265   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
266 
267   // Transfer all registered entries
268   for (size_t i = 0; i < _size; i++) {
269     const ZNMethodTableEntry entry = _table[i];
270     if (entry.registered()) {
271       register_entry(new_table, new_size, entry);
272     }
273   }
274 
275   if (_table != _scanned_table) {
276     // Delete old table
277     delete [] _table;
278   }
279 
280   // Install new table
281   _table = new_table;
282   _size = new_size;
283   _nunregistered = 0;
284 }
285 
286 void ZNMethodTable::rebuild_if_needed() {
287   // The hash table uses linear probing. To avoid wasting memory while
288   // at the same time maintaining good hash collision behavior we want
289   // to keep the table occupancy between 30% and 70%. The table always
290   // grows/shrinks by doubling/halving its size. Pruning of unregistered
291   // entries is done by rebuilding the table with or without resizing it.
292   const size_t min_size = 1024;
293   const size_t shrink_threshold = _size * 0.30;
294   const size_t prune_threshold = _size * 0.65;
295   const size_t grow_threshold = _size * 0.70;
296 
297   if (_size == 0) {
298     // Initialize table
299     rebuild(min_size);
300   } else if (_nregistered < shrink_threshold && _size > min_size) {
301     // Shrink table
302     rebuild(_size / 2);
303   } else if (_nregistered + _nunregistered > grow_threshold) {
304     // Prune or grow table
305     if (_nregistered < prune_threshold) {
306       // Prune table
307       rebuild(_size);
308     } else {
309       // Grow table
310       rebuild(_size * 2);
311     }
312   }
313 }
314 
315 void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
316   LogTarget(Trace, gc, nmethod) log;
317   if (!log.is_enabled()) {
318     return;
319   }
320 
321   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
322             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
323             nm->method()->method_holder()->external_name(),
324             nm->method()->name()->as_C_string(),
325             p2i(nm),
326             nm->compiler_name(),
327             nm->oops_count() - 1,
328             entry.immediate_oops() ? entry.method_with_immediate_oops()->immediate_oops_count() : 0,
329             BOOL_TO_STR(entry.non_immediate_oops()));
330 
331   LogTarget(Trace, gc, nmethod, oops) log_oops;
332   if (!log_oops.is_enabled()) {
333     return;
334   }
335 
336   // Print nmethod oops table
337   oop* const begin = nm->oops_begin();
338   oop* const end = nm->oops_end();
339   for (oop* p = begin; p < end; p++) {
340     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
341                    (p - begin), p2i(*p), (*p)->klass()->external_name());
342   }
343 
344   if (entry.immediate_oops()) {
345     // Print nmethod immediate oops
346     const ZNMethodWithImmediateOops* const nmi = entry.method_with_immediate_oops();
347     if (nmi != NULL) {
348       oop** const begin = nmi->immediate_oops_begin();
349       oop** const end = nmi->immediate_oops_end();
350       for (oop** p = begin; p < end; p++) {
351         log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
352                        (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
353       }
354     }
355   }
356 }
357 
358 void ZNMethodTable::log_unregister(const nmethod* nm) {
359   LogTarget(Debug, gc, nmethod) log;
360   if (!log.is_enabled()) {
361     return;
362   }
363 
364   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
365             nm->method()->method_holder()->external_name(),
366             nm->method()->name()->as_C_string(),
367             p2i(nm));
368 }
369 
370 size_t ZNMethodTable::registered_nmethods() {
371   return _nregistered;
372 }
373 
374 size_t ZNMethodTable::unregistered_nmethods() {
375   return _nunregistered;
376 }
377 
378 void ZNMethodTable::register_nmethod(nmethod* nm) {
379   ResourceMark rm;
380 
381   nm->disarm_barrier();
382 
383   // Create entry
384   const ZNMethodTableEntry entry = create_entry(nm);
385 
386   log_register(nm, entry);
387 
388   if (!entry.registered()) {
389     // Method doesn't have any oops, ignore it
390     return;
391   }
392 
393   // Grow/Shrink/Prune table if needed
394   rebuild_if_needed();
395 
396   // Insert new entry
397   if (register_entry(_table, _size, entry)) {
398     // New entry registered. When register_entry() instead returns
399     // false the nmethod was already in the table so we do not want
400     // to increase number of registered entries in that case.
401     _nregistered++;
402   }
403 }
404 
405 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
406   ResourceMark rm;
407 
408   log_unregister(nm);
409 
410   if (CodeCache_lock->owned_by_self()) {
411     SpinYield yield;
412     // Entering from the sweeper; prevent it from deleting concurrently scanned nmethods
413     while (_scanned_table != NULL) {
414       MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
415       yield.wait();
416     }
417   }
418 
419   // Remove entry
420   if (unregister_entry(_table, _size, nm)) {
421     // Entry was unregistered. When unregister_entry() instead returns
422     // false the nmethod was not in the table (because it didn't have
423     // any oops) so we do not want to decrease the number of registered
424     // entries in that case.
425     _nregistered--;
426     _nunregistered++;
427   }
428 }
429 
430 void ZNMethodTable::gc_prologue() {
431   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
432   ZLocker m(&_rebuild_lock);
433   _scanned_table = _table;
434   _scanned_table_size = _size;
435   _claimed = 0;
436 }
437 
438 void ZNMethodTable::gc_epilogue() {
439   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
440   ZLocker m(&_rebuild_lock);
441   if (_scanned_table != _table) {
442     delete [] _scanned_table;
443   }
444   _scanned_table = NULL;
445   assert(_claimed >= _scanned_table_size, "Failed to claim all table entries");
446 }
447 
448 void ZNMethodTable::entry_oops_do(ZNMethodTableEntry* entry, OopClosure* cl) {
449   entry_oops_do_no_fixup(entry, cl);
450   if (entry->non_immediate_oops()) {
451     // Process non-immediate oops
452     nmethod* const nm = method(*entry);
453     nm->fix_oop_relocations();
454   }
455 }
456 
457 void ZNMethodTable::entry_oops_do_no_fixup(ZNMethodTableEntry* entry, OopClosure* cl) {
458   nmethod* const nm = ZNMethodTable::method(*entry);
459   if (!nm->is_alive()) {
460     // No need to visit oops
461     return;
462   }
463 
464   // Process oops table
465   oop* const begin = nm->oops_begin();
466   oop* const end = nm->oops_end();
467   for (oop* p = begin; p < end; p++) {
468     if (*p != Universe::non_oop_word()) {
469       cl->do_oop(p);
470     }
471   }
472 
473   if (entry->immediate_oops()) {
474     // Process immediate oops
475     const ZNMethodWithImmediateOops* const nmi = entry->method_with_immediate_oops();
476     if (nmi != NULL) {
477       oop** const begin = nmi->immediate_oops_begin_safe();
478       oop** const end = nmi->immediate_oops_end();
479       for (oop** p = begin; p < end; p++) {
480         cl->do_oop(*p);
481       }
482     }
483   }
484 }
485 
486 class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
487 private:
488   OopClosure* _cl;
489 
490 public:
491   ZNMethodTableEntryToOopsDo(OopClosure* cl) : _cl(cl)  {}
492 
493   void do_nmethod_entry(ZNMethodTableEntry* entry) {
494     ZNMethodTable::entry_oops_do(entry, _cl);
495   }
496 };
497 
498 void ZNMethodTable::oops_do(OopClosure* cl) {
499   ZNMethodTableEntryToOopsDo entry_cl(cl);
500   nmethod_entries_do(&entry_cl);
501 }
502 
503 void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
504   for (;;) {
505     // Claim table partition. Each partition is currently sized to span
506     // two cache lines. This number is just a guess, but seems to work well.
507     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
508     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _scanned_table_size);
509     const size_t partition_end = MIN2(partition_start + partition_size, _scanned_table_size);
510     if (partition_start == partition_end) {
511       // End of table
512       break;
513     }
514 
515     // Process table partition
516     for (size_t i = partition_start; i < partition_end; i++) {
517       ZNMethodTableEntry& entry = _scanned_table[i];
518       if (entry.registered()) {
519         cl->do_nmethod_entry(&entry);
520       }
521     }
522   }
523 }
524 
525 class ZNMethodTableUnloadClosure : public ZNMethodTableEntryClosure {
526 private:
527   bool _unloading_occurred;
528 
529 public:
530   ZNMethodTableUnloadClosure(BoolObjectClosure* is_alive, bool unloading_occurred)
531     : ZNMethodTableEntryClosure(),
532       _unloading_occurred(unloading_occurred) {}
533 
534   virtual void do_nmethod_entry(ZNMethodTableEntry* entry) {
535     nmethod* nm = ZNMethodTable::method(*entry);
536     if (!nm->is_alive()) {
537       return;
538     }
539 
540     if (nm->is_unloading()) {
541       nm->make_unloaded();
542       return;
543     }
544 
545     ZPhantomKeepAliveOopClosure cl;
546     ZNMethodTable::entry_oops_do_no_fixup(entry, &cl);
547     ZNMethodBarrier::cleanup_nmethod_caches(nm, _unloading_occurred);
548     nm->disarm_barrier();
549   }
550 };
551 
552 class ZNMethodTableUnloadTask : public ZTask {
553 private:
554   ZNMethodTableUnloadClosure _unlink_cl;
555 
556 public:
557   ZNMethodTableUnloadTask(BoolObjectClosure* cl, bool unloading_occurred)
558     : ZTask("ZNMethodTableUnloadTask"),
559       _unlink_cl(cl, unloading_occurred)
560   {
561     ZNMethodTable::gc_prologue();
562   }
563 
564   ~ZNMethodTableUnloadTask() {
565     ZNMethodTable::gc_epilogue();
566   }
567 
568   virtual void work() {
569     ZNMethodTable::nmethod_entries_do(&_unlink_cl);
570   }
571 };
572 
573 void ZNMethodTable::do_unloading(ZWorkers* workers, BoolObjectClosure* is_alive, bool unloading_occurred) {
574   ZNMethodTableUnloadTask task(is_alive, unloading_occurred);
575   workers->run_concurrent(&task);
576 }