1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/vtableStubs.hpp"
  27 #include "compiler/compileBroker.hpp"
  28 #include "compiler/disassembler.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/instanceKlass.hpp"
  33 #include "oops/klassVtable.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "prims/forte.hpp"
  36 #include "prims/jvmtiExport.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/mutexLocker.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "utilities/align.hpp"
  41 #ifdef COMPILER2
  42 #include "opto/matcher.hpp"
  43 #endif
  44 
  45 // -----------------------------------------------------------------------------------------
  46 // Implementation of VtableStub
  47 
  48 address VtableStub::_chunk             = NULL;
  49 address VtableStub::_chunk_end         = NULL;
  50 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
  51 
  52 
  53 void* VtableStub::operator new(size_t size, int code_size) throw() {
  54   assert_lock_strong(VtableStubs_lock);
  55   assert(size == sizeof(VtableStub), "mismatched size");
  56   // compute real VtableStub size (rounded to nearest word)
  57   const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);
  58   // malloc them in chunks to minimize header overhead
  59   const int chunk_factor = 32;
  60   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
  61     const int bytes = chunk_factor * real_size + pd_code_alignment();
  62 
  63    // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
  64    // If changing the name, update the other file accordingly.
  65     VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
  66     if (blob == NULL) {
  67       return NULL;
  68     }
  69     _chunk = blob->content_begin();
  70     _chunk_end = _chunk + bytes;
  71     Forte::register_stub("vtable stub", _chunk, _chunk_end);
  72     align_chunk();
  73   }
  74   assert(_chunk + real_size <= _chunk_end, "bad allocation");
  75   void* res = _chunk;
  76   _chunk += real_size;
  77   align_chunk();
  78  return res;
  79 }
  80 
  81 
  82 void VtableStub::print_on(outputStream* st) const {
  83   st->print("vtable stub (index = %d, receiver_location = " INTX_FORMAT ", code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
  84              index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));
  85 }
  86 
  87 void VtableStub::print() const { print_on(tty); }
  88 
  89 // -----------------------------------------------------------------------------------------
  90 // Implementation of VtableStubs
  91 //
  92 // For each hash value there's a linked list of vtable stubs (with that
  93 // hash value). Each list is anchored in a little hash _table, indexed
  94 // by that hash value.
  95 
  96 VtableStub* VtableStubs::_table[VtableStubs::N];
  97 int VtableStubs::_number_of_vtable_stubs = 0;
  98 int VtableStubs::_vtab_stub_size = 0;
  99 int VtableStubs::_itab_stub_size = 0;
 100 
 101 #if defined(PRODUCT)
 102   // These values are good for the PRODUCT case (no tracing).
 103   static const int first_vtableStub_size =  64;
 104   static const int first_itableStub_size = 256;
 105 #else
 106   // These values are good for the non-PRODUCT case (when tracing can be switched on).
 107   // To find out, run test workload with
 108   //   -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables
 109   // and use the reported "estimate" value.
 110   // Here is a list of observed worst-case values:
 111   //               vtable  itable
 112   // aarch64:         460     324
 113   // arm:               ?       ?
 114   // ppc (linux, BE): 404     288
 115   // ppc (linux, LE): 356     276
 116   // ppc (AIX):       416     296
 117   // s390x:           408     256
 118   // Solaris-sparc:   792     348
 119   // x86 (Linux):     670     309
 120   // x86 (MacOS):     682     321
 121   static const int first_vtableStub_size = 1024;
 122   static const int first_itableStub_size =  512;
 123 #endif
 124 
 125 
 126 void VtableStubs::initialize() {
 127   VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
 128   {
 129     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 130     assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
 131     assert(is_power_of_2(N), "N must be a power of 2");
 132     for (int i = 0; i < N; i++) {
 133       _table[i] = NULL;
 134     }
 135   }
 136 }
 137 
 138 
 139 int VtableStubs::code_size_limit(bool is_vtable_stub) {
 140   if (is_vtable_stub) {
 141     return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
 142   } else { // itable stub
 143     return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
 144   }
 145 }   // code_size_limit
 146 
 147 
 148 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
 149                                            int  code_size,
 150                                            int  padding) {
 151   const char* name = is_vtable_stub ? "vtable" : "itable";
 152 
 153   guarantee(code_size <= code_size_limit(is_vtable_stub),
 154             "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));
 155 
 156   if (is_vtable_stub) {
 157     if (log_is_enabled(Trace, vtablestubs)) {
 158       if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) {
 159         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
 160                                name, _vtab_stub_size, code_size + padding);
 161       }
 162     }
 163     if ( (code_size + padding) > _vtab_stub_size ) {
 164       _vtab_stub_size = code_size + padding;
 165     }
 166   } else {  // itable stub
 167     if (log_is_enabled(Trace, vtablestubs)) {
 168       if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) {
 169         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
 170                                name, _itab_stub_size, code_size + padding);
 171       }
 172     }
 173     if ( (code_size + padding) > _itab_stub_size ) {
 174       _itab_stub_size = code_size + padding;
 175     }
 176   }
 177   return;
 178 }   // check_and_set_size_limit
 179 
 180 
 181 void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
 182                               address npe_addr, address ame_addr,   bool is_vtable_stub,
 183                               int     index,    int     slop_bytes, int  index_dependent_slop) {
 184   const char* name        = is_vtable_stub ? "vtable" : "itable";
 185   const int   stub_length = code_size_limit(is_vtable_stub);
 186 
 187   if (log_is_enabled(Trace, vtablestubs)) {
 188     log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",
 189                            name, index, p2i(s->code_begin()),
 190                            (int)(masm->pc() - s->code_begin()),
 191                            stub_length,
 192                            (int)(s->code_end() - masm->pc()));
 193   }
 194   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
 195                                          name, index, stub_length,
 196                                          (int)(masm->pc() - s->code_begin()),
 197                                          (int)(masm->pc() - s->code_end()));
 198   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
 199                                          name, index, index_dependent_slop,
 200                                          (int)(s->code_end() - masm->pc()));
 201 
 202   // After the first vtable/itable stub is generated, we have a much
 203   // better estimate for the stub size. Remember/update this
 204   // estimate after some sanity checks.
 205   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
 206   s->set_exception_points(npe_addr, ame_addr);
 207 }
 208 
 209 
 210 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
 211   assert(vtable_index >= 0, "must be positive");
 212 
 213   VtableStub* s;
 214   {
 215     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 216     s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
 217     if (s == NULL) {
 218       if (is_vtable_stub) {
 219         s = create_vtable_stub(vtable_index);
 220       } else {
 221         s = create_itable_stub(vtable_index);
 222       }
 223 
 224       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 225       if (s == NULL) {
 226         return NULL;
 227       }
 228 
 229       enter(is_vtable_stub, vtable_index, s);
 230       if (PrintAdapterHandlers) {
 231         tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,
 232                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));
 233         Disassembler::decode(s->code_begin(), s->code_end());
 234       }
 235       // Notify JVMTI about this stub. The event will be recorded by the enclosing
 236       // JvmtiDynamicCodeEventCollector and posted when this thread has released
 237       // all locks.
 238       if (JvmtiExport::should_post_dynamic_code_generated()) {
 239         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
 240                                                                      s->code_begin(), s->code_end());
 241       }
 242     }
 243   }
 244   return s->entry_point();
 245 }
 246 
 247 
 248 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
 249   // Assumption: receiver_location < 4 in most cases.
 250   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
 251   return (is_vtable_stub ? ~hash : hash)  & mask;
 252 }
 253 
 254 
 255 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
 256   assert_lock_strong(VtableStubs_lock);
 257   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
 258   VtableStub* s = _table[hash];
 259   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
 260   return s;
 261 }
 262 
 263 
 264 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
 265   assert_lock_strong(VtableStubs_lock);
 266   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
 267   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
 268   // enter s at the beginning of the corresponding list
 269   s->set_next(_table[h]);
 270   _table[h] = s;
 271   _number_of_vtable_stubs++;
 272 }
 273 
 274 VtableStub* VtableStubs::entry_point(address pc) {
 275   MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 276   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
 277   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
 278   VtableStub* s;
 279   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
 280   return (s == stub) ? s : NULL;
 281 }
 282 
 283 bool VtableStubs::contains(address pc) {
 284   // simple solution for now - we may want to use
 285   // a faster way if this function is called often
 286   return stub_containing(pc) != NULL;
 287 }
 288 
 289 
 290 VtableStub* VtableStubs::stub_containing(address pc) {
 291   // Note: No locking needed since any change to the data structure
 292   //       happens with an atomic store into it (we don't care about
 293   //       consistency with the _number_of_vtable_stubs counter).
 294   for (int i = 0; i < N; i++) {
 295     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 296       if (s->contains(pc)) return s;
 297     }
 298   }
 299   return NULL;
 300 }
 301 
 302 void vtableStubs_init() {
 303   VtableStubs::initialize();
 304 }
 305 
 306 void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
 307     for (int i = 0; i < N; i++) {
 308         for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 309             f(s);
 310         }
 311     }
 312 }
 313 
 314 
 315 //-----------------------------------------------------------------------------------------------------
 316 // Non-product code
 317 #ifndef PRODUCT
 318 
 319 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
 320   ResourceMark rm;
 321   HandleMark hm;
 322   Klass* klass = receiver->klass();
 323   InstanceKlass* ik = InstanceKlass::cast(klass);
 324   klassVtable vt = ik->vtable();
 325   ik->print();
 326   fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
 327         "index %d (vtable length %d)",
 328         p2i(receiver), index, vt.length());
 329 }
 330 
 331 #endif // PRODUCT