< prev index next >

src/hotspot/share/code/vtableStubs.cpp

Print this page
   1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 189                            (int)(masm->pc() - s->code_begin()),
 190                            stub_length,
 191                            (int)(s->code_end() - masm->pc()));
 192   }
 193   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
 194                                          name, index, stub_length,
 195                                          (int)(masm->pc() - s->code_begin()),
 196                                          (int)(masm->pc() - s->code_end()));
 197   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
 198                                          name, index, index_dependent_slop,
 199                                          (int)(s->code_end() - masm->pc()));
 200 
 201   // After the first vtable/itable stub is generated, we have a much
 202   // better estimate for the stub size. Remember/update this
 203   // estimate after some sanity checks.
 204   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
 205   s->set_exception_points(npe_addr, ame_addr);
 206 }
 207 
 208 
 209 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
 210   assert(vtable_index >= 0, "must be positive");
 211 
 212   VtableStub* s;
 213   {
 214     MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 215     s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
 216     if (s == NULL) {
 217       if (is_vtable_stub) {
 218         s = create_vtable_stub(vtable_index);
 219       } else {
 220         s = create_itable_stub(vtable_index);
 221       }
 222 
 223       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 224       if (s == NULL) {
 225         return NULL;
 226       }
 227 
 228       enter(is_vtable_stub, vtable_index, s);
 229       if (PrintAdapterHandlers) {
 230         tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,
 231                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));
 232         Disassembler::decode(s->code_begin(), s->code_end());
 233       }
 234       // Notify JVMTI about this stub. The event will be recorded by the enclosing
 235       // JvmtiDynamicCodeEventCollector and posted when this thread has released
 236       // all locks.
 237       if (JvmtiExport::should_post_dynamic_code_generated()) {
 238         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
 239                                                                      s->code_begin(), s->code_end());
 240       }
 241     }
 242   }
 243   return s->entry_point();
 244 }
 245 
 246 
 247 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
 248   // Assumption: receiver_location < 4 in most cases.
 249   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;



 250   return (is_vtable_stub ? ~hash : hash)  & mask;
 251 }
 252 
 253 
 254 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
 255   assert_lock_strong(VtableStubs_lock);
 256   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
 257   VtableStub* s = _table[hash];
 258   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
 259   return s;
 260 }
 261 
 262 
 263 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
 264   assert_lock_strong(VtableStubs_lock);
 265   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
 266   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
 267   // enter s at the beginning of the corresponding list
 268   s->set_next(_table[h]);
 269   _table[h] = s;
 270   _number_of_vtable_stubs++;
 271 }
 272 
 273 VtableStub* VtableStubs::entry_point(address pc) {
 274   MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 275   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
 276   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
 277   VtableStub* s;
 278   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
 279   return (s == stub) ? s : NULL;
 280 }
 281 
 282 bool VtableStubs::contains(address pc) {
 283   // simple solution for now - we may want to use
 284   // a faster way if this function is called often
 285   return stub_containing(pc) != NULL;
 286 }
 287 
 288 
 289 VtableStub* VtableStubs::stub_containing(address pc) {
 290   // Note: No locking needed since any change to the data structure
 291   //       happens with an atomic store into it (we don't care about
 292   //       consistency with the _number_of_vtable_stubs counter).
 293   for (int i = 0; i < N; i++) {
 294     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 295       if (s->contains(pc)) return s;
 296     }


   1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 189                            (int)(masm->pc() - s->code_begin()),
 190                            stub_length,
 191                            (int)(s->code_end() - masm->pc()));
 192   }
 193   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
 194                                          name, index, stub_length,
 195                                          (int)(masm->pc() - s->code_begin()),
 196                                          (int)(masm->pc() - s->code_end()));
 197   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
 198                                          name, index, index_dependent_slop,
 199                                          (int)(s->code_end() - masm->pc()));
 200 
 201   // After the first vtable/itable stub is generated, we have a much
 202   // better estimate for the stub size. Remember/update this
 203   // estimate after some sanity checks.
 204   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
 205   s->set_exception_points(npe_addr, ame_addr);
 206 }
 207 
 208 
 209 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
 210   assert(vtable_index >= 0, "must be positive");
 211 
 212   VtableStub* s;
 213   {
 214     MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 215     s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index, caller_is_c1) : NULL;
 216     if (s == NULL) {
 217       if (is_vtable_stub) {
 218         s = create_vtable_stub(vtable_index, caller_is_c1);
 219       } else {
 220         s = create_itable_stub(vtable_index, caller_is_c1);
 221       }
 222 
 223       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 224       if (s == NULL) {
 225         return NULL;
 226       }
 227 
 228       enter(is_vtable_stub, vtable_index, caller_is_c1, s);
 229       if (PrintAdapterHandlers) {
 230         tty->print_cr("Decoding VtableStub (%s) %s[%d]@" INTX_FORMAT, caller_is_c1 ? "c1" : "full opt",
 231                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));
 232         Disassembler::decode(s->code_begin(), s->code_end());
 233       }
 234       // Notify JVMTI about this stub. The event will be recorded by the enclosing
 235       // JvmtiDynamicCodeEventCollector and posted when this thread has released
 236       // all locks.
 237       if (JvmtiExport::should_post_dynamic_code_generated()) {
 238         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",  // FIXME: need to pass caller_is_c1??
 239                                                                      s->code_begin(), s->code_end());
 240       }
 241     }
 242   }
 243   return s->entry_point();
 244 }
 245 
 246 
 247 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
 248   // Assumption: receiver_location < 4 in most cases.
 249   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
 250   if (caller_is_c1) {
 251     hash = 7 - hash;
 252   }
 253   return (is_vtable_stub ? ~hash : hash)  & mask;
 254 }
 255 
 256 
 257 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
 258   assert_lock_strong(VtableStubs_lock);
 259   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
 260   VtableStub* s = _table[hash];
 261   while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
 262   return s;
 263 }
 264 
 265 
 266 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
 267   assert_lock_strong(VtableStubs_lock);
 268   assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
 269   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
 270   // enter s at the beginning of the corresponding list
 271   s->set_next(_table[h]);
 272   _table[h] = s;
 273   _number_of_vtable_stubs++;
 274 }
 275 
 276 VtableStub* VtableStubs::entry_point(address pc) {
 277   MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
 278   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
 279   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index(), stub->caller_is_c1());
 280   VtableStub* s;
 281   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
 282   return (s == stub) ? s : NULL;
 283 }
 284 
 285 bool VtableStubs::contains(address pc) {
 286   // simple solution for now - we may want to use
 287   // a faster way if this function is called often
 288   return stub_containing(pc) != NULL;
 289 }
 290 
 291 
 292 VtableStub* VtableStubs::stub_containing(address pc) {
 293   // Note: No locking needed since any change to the data structure
 294   //       happens with an atomic store into it (we don't care about
 295   //       consistency with the _number_of_vtable_stubs counter).
 296   for (int i = 0; i < N; i++) {
 297     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 298       if (s->contains(pc)) return s;
 299     }


< prev index next >