1 /*
  2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/vtableStubs.hpp"
 27 #include "compiler/compileBroker.hpp"
 28 #include "compiler/disassembler.hpp"
 29 #include "logging/log.hpp"
 30 #include "memory/allocation.inline.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/instanceKlass.hpp"
 33 #include "oops/klassVtable.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "prims/forte.hpp"
 36 #include "prims/jvmtiExport.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/mutexLocker.hpp"
 39 #include "runtime/sharedRuntime.hpp"
 40 #include "utilities/align.hpp"
 41 #ifdef COMPILER2
 42 #include "opto/matcher.hpp"
 43 #endif
 44 
 45 // -----------------------------------------------------------------------------------------
 46 // Implementation of VtableStub
 47 
 48 address VtableStub::_chunk             = NULL;
 49 address VtableStub::_chunk_end         = NULL;
 50 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
 51 
 52 
 53 void* VtableStub::operator new(size_t size, int code_size) throw() {
 54   assert(size == sizeof(VtableStub), "mismatched size");
 55   // compute real VtableStub size (rounded to nearest word)
 56   const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);
 57   // malloc them in chunks to minimize header overhead
 58   const int chunk_factor = 32;
 59   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
 60     const int bytes = chunk_factor * real_size + pd_code_alignment();
 61 
 62    // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
 63    // If changing the name, update the other file accordingly.
 64     VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
 65     if (blob == NULL) {
 66       return NULL;
 67     }
 68     _chunk = blob->content_begin();
 69     _chunk_end = _chunk + bytes;
 70     Forte::register_stub("vtable stub", _chunk, _chunk_end);
 71     align_chunk();
 72   }
 73   assert(_chunk + real_size <= _chunk_end, "bad allocation");
 74   void* res = _chunk;
 75   _chunk += real_size;
 76   align_chunk();
 77  return res;
 78 }
 79 
 80 
 81 void VtableStub::print_on(outputStream* st) const {
 82   st->print("vtable stub (index = %d, receiver_location = " INTX_FORMAT ", code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
 83              index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));
 84 }
 85 
 86 
 87 // -----------------------------------------------------------------------------------------
 88 // Implementation of VtableStubs
 89 //
 90 // For each hash value there's a linked list of vtable stubs (with that
 91 // hash value). Each list is anchored in a little hash _table, indexed
 92 // by that hash value.
 93 
 94 VtableStub* VtableStubs::_table[VtableStubs::N];
 95 int VtableStubs::_number_of_vtable_stubs = 0;
 96 int VtableStubs::_vtab_stub_size = 0;
 97 int VtableStubs::_itab_stub_size = 0;
 98 
 99 #if defined(PRODUCT)
100   // These values are good for the PRODUCT case (no tracing).
101   static const int first_vtableStub_size =  64;
102   static const int first_itableStub_size = 256;
103 #else
104   // These values are good for the non-PRODUCT case (when tracing can be switched on).
105   // To find out, run test workload with
106   //   -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables
107   // and use the reported "estimate" value.
108   // Here is a list of observed worst-case values:
109   //               vtable  itable
110   // aarch64:         460     324
111   // arm:               ?       ?
112   // ppc (linux, BE): 404     288
113   // ppc (linux, LE): 356     276
114   // ppc (AIX):       416     296
115   // s390x:           408     256
116   // Solaris-sparc:   792     348
117   // x86 (Linux):     670     309
118   // x86 (MacOS):     682     321
119   static const int first_vtableStub_size = 1024;
120   static const int first_itableStub_size =  512;
121 #endif
122 
123 
124 void VtableStubs::initialize() {
125   VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
126   {
127     MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
128     assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
129     assert(is_power_of_2(N), "N must be a power of 2");
130     for (int i = 0; i < N; i++) {
131       _table[i] = NULL;
132     }
133   }
134 }
135 
136 
137 int VtableStubs::code_size_limit(bool is_vtable_stub) {
138   if (is_vtable_stub) {
139     return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
140   } else { // itable stub
141     return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
142   }
143 }   // code_size_limit
144 
145 
146 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
147                                            int  code_size,
148                                            int  padding) {
149   const char* name = is_vtable_stub ? "vtable" : "itable";
150 
151   guarantee(code_size <= code_size_limit(is_vtable_stub),
152             "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));
153 
154   if (is_vtable_stub) {
155     if (log_is_enabled(Trace, vtablestubs)) {
156       if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) {
157         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
158                                name, _vtab_stub_size, code_size + padding);
159       }
160     }
161     if ( (code_size + padding) > _vtab_stub_size ) {
162       _vtab_stub_size = code_size + padding;
163     }
164   } else {  // itable stub
165     if (log_is_enabled(Trace, vtablestubs)) {
166       if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) {
167         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
168                                name, _itab_stub_size, code_size + padding);
169       }
170     }
171     if ( (code_size + padding) > _itab_stub_size ) {
172       _itab_stub_size = code_size + padding;
173     }
174   }
175   return;
176 }   // check_and_set_size_limit
177 
178 
179 void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
180                               address npe_addr, address ame_addr,   bool is_vtable_stub,
181                               int     index,    int     slop_bytes, int  index_dependent_slop) {
182   const char* name        = is_vtable_stub ? "vtable" : "itable";
183   const int   stub_length = code_size_limit(is_vtable_stub);
184 
185   if (log_is_enabled(Trace, vtablestubs)) {
186     log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",
187                            name, index, p2i(s->code_begin()),
188                            (int)(masm->pc() - s->code_begin()),
189                            stub_length,
190                            (int)(s->code_end() - masm->pc()));
191   }
192   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
193                                          name, index, stub_length,
194                                          (int)(masm->pc() - s->code_begin()),
195                                          (int)(masm->pc() - s->code_end()));
196   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
197                                          name, index, index_dependent_slop,
198                                          (int)(s->code_end() - masm->pc()));
199 
200   // After the first vtable/itable stub is generated, we have a much
201   // better estimate for the stub size. Remember/update this
202   // estimate after some sanity checks.
203   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
204   s->set_exception_points(npe_addr, ame_addr);
205 }
206 
207 
208 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
209   assert(vtable_index >= 0, "must be positive");
210 
211   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
212   if (s == NULL) {
213     if (is_vtable_stub) {
214       s = create_vtable_stub(vtable_index);
215     } else {
216       s = create_itable_stub(vtable_index);
217     }
218 
219     // Creation of vtable or itable can fail if there is not enough free space in the code cache.
220     if (s == NULL) {
221       return NULL;
222     }
223 
224     enter(is_vtable_stub, vtable_index, s);
225     if (PrintAdapterHandlers) {
226       tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,
227                     is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));
228       Disassembler::decode(s->code_begin(), s->code_end());
229     }
230     // Notify JVMTI about this stub. The event will be recorded by the enclosing
231     // JvmtiDynamicCodeEventCollector and posted when this thread has released
232     // all locks.
233     if (JvmtiExport::should_post_dynamic_code_generated()) {
234       JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
235                                                                    s->code_begin(), s->code_end());
236     }
237   }
238   return s->entry_point();
239 }
240 
241 
242 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
243   // Assumption: receiver_location < 4 in most cases.
244   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
245   return (is_vtable_stub ? ~hash : hash)  & mask;
246 }
247 
248 
249 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
250   MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
251   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
252   VtableStub* s = _table[hash];
253   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
254   return s;
255 }
256 
257 
258 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
259   MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
260   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
261   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
262   // enter s at the beginning of the corresponding list
263   s->set_next(_table[h]);
264   _table[h] = s;
265   _number_of_vtable_stubs++;
266 }
267 
268 VtableStub* VtableStubs::entry_point(address pc) {
269   MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
270   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
271   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
272   VtableStub* s;
273   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
274   return (s == stub) ? s : NULL;
275 }
276 
277 bool VtableStubs::contains(address pc) {
278   // simple solution for now - we may want to use
279   // a faster way if this function is called often
280   return stub_containing(pc) != NULL;
281 }
282 
283 
284 VtableStub* VtableStubs::stub_containing(address pc) {
285   // Note: No locking needed since any change to the data structure
286   //       happens with an atomic store into it (we don't care about
287   //       consistency with the _number_of_vtable_stubs counter).
288   for (int i = 0; i < N; i++) {
289     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
290       if (s->contains(pc)) return s;
291     }
292   }
293   return NULL;
294 }
295 
296 void vtableStubs_init() {
297   VtableStubs::initialize();
298 }
299 
300 void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
301     for (int i = 0; i < N; i++) {
302         for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
303             f(s);
304         }
305     }
306 }
307 
308 
309 //-----------------------------------------------------------------------------------------------------
310 // Non-product code
311 #ifndef PRODUCT
312 
313 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
314   ResourceMark rm;
315   HandleMark hm;
316   Klass* klass = receiver->klass();
317   InstanceKlass* ik = InstanceKlass::cast(klass);
318   klassVtable vt = ik->vtable();
319   ik->print();
320   fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
321         "index %d (vtable length %d)",
322         p2i(receiver), index, vt.length());
323 }
324 
325 #endif // PRODUCT