< prev index next >

src/hotspot/share/code/vtableStubs.cpp

Print this page
rev 51433 : 8207343: Automate vtable/itable stub size calculation
Reviewed-by: kvn


  73   void* res = _chunk;
  74   _chunk += real_size;
  75   align_chunk();
  76  return res;
  77 }
  78 
  79 
  80 void VtableStub::print_on(outputStream* st) const {
  81   st->print("vtable stub (index = %d, receiver_location = " INTX_FORMAT ", code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
  82              index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));
  83 }
  84 
  85 
  86 // -----------------------------------------------------------------------------------------
  87 // Implementation of VtableStubs
  88 //
  89 // For each hash value there's a linked list of vtable stubs (with that
  90 // hash value). Each list is anchored in a little hash _table, indexed
  91 // by that hash value.
  92 


  93 VtableStub* VtableStubs::_table[VtableStubs::N];
  94 int VtableStubs::_number_of_vtable_stubs = 0;


  95 
  96 
  97 void VtableStubs::initialize() {
  98   VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
  99   {
 100     MutexLocker ml(VtableStubs_lock);
 101     assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
 102     assert(is_power_of_2(N), "N must be a power of 2");
 103     for (int i = 0; i < N; i++) {
 104       _table[i] = NULL;
 105     }
 106   }
 107 }
 108 
 109 









































































 110 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
 111   assert(vtable_index >= 0, "must be positive");
 112 
 113   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
 114   if (s == NULL) {
 115     if (is_vtable_stub) {
 116       s = create_vtable_stub(vtable_index);
 117     } else {
 118       s = create_itable_stub(vtable_index);
 119     }
 120 
 121     // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 122     if (s == NULL) {
 123       return NULL;
 124     }
 125 
 126     enter(is_vtable_stub, vtable_index, s);
 127     if (PrintAdapterHandlers) {
 128       tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,
 129                     is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));


 156   return s;
 157 }
 158 
 159 
 160 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
 161   MutexLocker ml(VtableStubs_lock);
 162   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
 163   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
 164   // enter s at the beginning of the corresponding list
 165   s->set_next(_table[h]);
 166   _table[h] = s;
 167   _number_of_vtable_stubs++;
 168 }
 169 
 170 VtableStub* VtableStubs::entry_point(address pc) {
 171   MutexLocker ml(VtableStubs_lock);
 172   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
 173   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
 174   VtableStub* s;
 175   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
 176   if (s == stub) {
 177     return s;
 178   }
 179   return NULL;
 180 }
 181 
 182 bool VtableStubs::contains(address pc) {
 183   // simple solution for now - we may want to use
 184   // a faster way if this function is called often
 185   return stub_containing(pc) != NULL;
 186 }
 187 
 188 
 189 VtableStub* VtableStubs::stub_containing(address pc) {
 190   // Note: No locking needed since any change to the data structure
 191   //       happens with an atomic store into it (we don't care about
 192   //       consistency with the _number_of_vtable_stubs counter).
 193   for (int i = 0; i < N; i++) {
 194     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 195       if (s->contains(pc)) return s;
 196     }
 197   }
 198   return NULL;
 199 }




  73   void* res = _chunk;
  74   _chunk += real_size;
  75   align_chunk();
  76  return res;
  77 }
  78 
  79 
  80 void VtableStub::print_on(outputStream* st) const {
  81   st->print("vtable stub (index = %d, receiver_location = " INTX_FORMAT ", code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
  82              index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));
  83 }
  84 
  85 
  86 // -----------------------------------------------------------------------------------------
  87 // Implementation of VtableStubs
  88 //
  89 // For each hash value there's a linked list of vtable stubs (with that
  90 // hash value). Each list is anchored in a little hash _table, indexed
  91 // by that hash value.
  92 
  93 static int const firstStub_size = 1024;
  94 
  95 VtableStub* VtableStubs::_table[VtableStubs::N];
  96 int VtableStubs::_number_of_vtable_stubs = 0;
  97 int VtableStubs::_vtab_stub_size = 0;
  98 int VtableStubs::_itab_stub_size = 0;
  99 
 100 
 101 void VtableStubs::initialize() {
 102   VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
 103   {
 104     MutexLocker ml(VtableStubs_lock);
 105     assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
 106     assert(is_power_of_2(N), "N must be a power of 2");
 107     for (int i = 0; i < N; i++) {
 108       _table[i] = NULL;
 109     }
 110   }
 111 }
 112 
 113 
 114 int VtableStub::code_size_limit(bool is_vtable_stub) {
 115   if (is_vtable_stub) {
 116     return VtableStubs::_vtab_stub_size > 0 ? VtableStubs::_vtab_stub_size
 117                                             : firstStub_size;
 118   } else { // itable stub
 119     return VtableStubs::_itab_stub_size > 0 ? VtableStubs::_itab_stub_size
 120                                             : firstStub_size;
 121   }
 122 }   // code_size_limit
 123 
 124 
 125 void VtableStub::check_and_set_size_limit(bool is_vtable_stub,
 126                                           int  code_size,
 127                                           int  padding        ) {
 128   const char* name = is_vtable_stub ? "vtable" : "itable";
 129 
 130   guarantee(code_size <= code_size_limit(is_vtable_stub),
 131             "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));
 132 
 133   if (is_vtable_stub) {
 134     if (log_is_enabled(Trace, vtablestubs)) {
 135       if ( (VtableStubs::_vtab_stub_size > 0) && ((code_size + padding) > VtableStubs::_vtab_stub_size) ) {
 136         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
 137                                name, VtableStubs::_vtab_stub_size, code_size + padding);
 138       }
 139     }
 140     if ( (code_size + padding) > VtableStubs::_vtab_stub_size ) {
 141       VtableStubs::_vtab_stub_size = code_size + padding;
 142     }
 143   } else {  // itable stub
 144     if (log_is_enabled(Trace, vtablestubs)) {
 145       if ( (VtableStubs::_itab_stub_size > 0) && ((code_size + padding) > VtableStubs::_itab_stub_size) ) {
 146         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
 147                                name, VtableStubs::_itab_stub_size, code_size + padding);
 148       }
 149     }
 150     if ( (code_size + padding) > VtableStubs::_itab_stub_size ) {
 151       VtableStubs::_itab_stub_size = code_size + padding;
 152     }
 153   }
 154   return;
 155 }   // check_and_set_size_limit
 156 
 157 
 158 void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
 159                               address npe_addr, address ame_addr,   bool is_vtable_stub,
 160                               int     index,    int     slop_bytes, int  slop32) {
 161   const char* name        = is_vtable_stub ? "vtable" : "itable";
 162   const int   stub_length = VtableStub::code_size_limit(is_vtable_stub);
 163 
 164   if (log_is_enabled(Trace, vtablestubs)) {
 165     log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",
 166                            name, index, p2i(s->code_begin()),
 167                            (int)(masm->pc() - s->code_begin()),
 168                            stub_length,
 169                            (int)(s->code_end() - masm->pc()));
 170   }
 171   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
 172                                          name, index, stub_length,
 173                                          (int)(masm->pc() - s->code_begin()),
 174                                          (int)(masm->pc() - s->code_end()));
 175   assert((masm->pc() + slop32) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
 176                                          name, index, slop32,
 177                                          (int)(s->code_end() - masm->pc()));
 178 
 179   // After the first vtable/itable stub is generated, we have a much
 180   // better estimate for the stub size. Remember/update this
 181   // estimate after some sanity checks.
 182   s->check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
 183   s->set_exception_points(npe_addr, ame_addr);
 184 }
 185 
 186 
 187 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
 188   assert(vtable_index >= 0, "must be positive");
 189 
 190   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
 191   if (s == NULL) {
 192     if (is_vtable_stub) {
 193       s = create_vtable_stub(vtable_index);
 194     } else {
 195       s = create_itable_stub(vtable_index);
 196     }
 197 
 198     // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 199     if (s == NULL) {
 200       return NULL;
 201     }
 202 
 203     enter(is_vtable_stub, vtable_index, s);
 204     if (PrintAdapterHandlers) {
 205       tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,
 206                     is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));


 233   return s;
 234 }
 235 
 236 
 237 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
 238   MutexLocker ml(VtableStubs_lock);
 239   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
 240   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
 241   // enter s at the beginning of the corresponding list
 242   s->set_next(_table[h]);
 243   _table[h] = s;
 244   _number_of_vtable_stubs++;
 245 }
 246 
 247 VtableStub* VtableStubs::entry_point(address pc) {
 248   MutexLocker ml(VtableStubs_lock);
 249   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
 250   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
 251   VtableStub* s;
 252   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
 253   return (s == stub) ? s : NULL;



 254 }
 255 
 256 bool VtableStubs::contains(address pc) {
 257   // simple solution for now - we may want to use
 258   // a faster way if this function is called often
 259   return stub_containing(pc) != NULL;
 260 }
 261 
 262 
 263 VtableStub* VtableStubs::stub_containing(address pc) {
 264   // Note: No locking needed since any change to the data structure
 265   //       happens with an atomic store into it (we don't care about
 266   //       consistency with the _number_of_vtable_stubs counter).
 267   for (int i = 0; i < N; i++) {
 268     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 269       if (s->contains(pc)) return s;
 270     }
 271   }
 272   return NULL;
 273 }


< prev index next >