src/share/vm/code/vtableStubs.cpp

Print this page




  45 address VtableStub::_chunk             = NULL;
  46 address VtableStub::_chunk_end         = NULL;
  47 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
  48 
  49 
  50 void* VtableStub::operator new(size_t size, int code_size) throw() {
  51   assert(size == sizeof(VtableStub), "mismatched size");
  52   // compute real VtableStub size (rounded to nearest word)
  53   const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
  54   // malloc them in chunks to minimize header overhead
  55   const int chunk_factor = 32;
  56   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
  57     const int bytes = chunk_factor * real_size + pd_code_alignment();
  58     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
  59     if (blob == NULL) {
  60       return NULL;
  61     }
  62     _chunk = blob->content_begin();
  63     _chunk_end = _chunk + bytes;
  64     Forte::register_stub("vtable stub", _chunk, _chunk_end);
  65     // Notify JVMTI about this stub. The event will be recorded by the enclosing
  66     // JvmtiDynamicCodeEventCollector and posted when this thread has released
  67     // all locks.
  68     if (JvmtiExport::should_post_dynamic_code_generated()) {
  69       JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
  70     }
  71     align_chunk();
  72   }
  73   assert(_chunk + real_size <= _chunk_end, "bad allocation");
  74   void* res = _chunk;
  75   _chunk += real_size;
  76   align_chunk();
  77  return res;
  78 }
  79 
  80 
  81 void VtableStub::print_on(outputStream* st) const {
  82   st->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
  83              index(), receiver_location(), code_begin(), code_end());
  84 }
  85 
  86 
  87 // -----------------------------------------------------------------------------------------
  88 // Implementation of VtableStubs
  89 //
  90 // For each hash value there's a linked list of vtable stubs (with that


 113 
 114   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
 115   if (s == NULL) {
 116     if (is_vtable_stub) {
 117       s = create_vtable_stub(vtable_index);
 118     } else {
 119       s = create_itable_stub(vtable_index);
 120     }
 121 
 122     // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 123     if (s == NULL) {
 124       return NULL;
 125     }
 126 
 127     enter(is_vtable_stub, vtable_index, s);
 128     if (PrintAdapterHandlers) {
 129       tty->print_cr("Decoding VtableStub %s[%d]@%d",
 130                     is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
 131       Disassembler::decode(s->code_begin(), s->code_end());
 132     }






 133   }

 134   return s->entry_point();
 135 }
 136 
 137 
 138 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
 139   // Assumption: receiver_location < 4 in most cases.
 140   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
 141   return (is_vtable_stub ? ~hash : hash)  & mask;
 142 }
 143 
 144 
 145 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
 146   MutexLocker ml(VtableStubs_lock);
 147   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
 148   VtableStub* s = _table[hash];
 149   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
 150   return s;
 151 }
 152 
 153 


 178   return stub_containing(pc) != NULL;
 179 }
 180 
 181 
 182 VtableStub* VtableStubs::stub_containing(address pc) {
 183   // Note: No locking needed since any change to the data structure
 184   //       happens with an atomic store into it (we don't care about
 185   //       consistency with the _number_of_vtable_stubs counter).
 186   for (int i = 0; i < N; i++) {
 187     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 188       if (s->contains(pc)) return s;
 189     }
 190   }
 191   return NULL;
 192 }
 193 
 194 void vtableStubs_init() {
 195   VtableStubs::initialize();
 196 }
 197 








 198 
 199 //-----------------------------------------------------------------------------------------------------
 200 // Non-product code
 201 #ifndef PRODUCT
 202 
 203 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
 204   ResourceMark rm;
 205   HandleMark hm;
 206   Klass* klass = receiver->klass();
 207   InstanceKlass* ik = InstanceKlass::cast(klass);
 208   klassVtable* vt = ik->vtable();
 209   ik->print();
 210   fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
 211                 "index %d (vtable length %d)",
 212                 (address)receiver, index, vt->length()));
 213 }
 214 
 215 #endif // Product


  45 address VtableStub::_chunk             = NULL;
  46 address VtableStub::_chunk_end         = NULL;
  47 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
  48 
  49 
  50 void* VtableStub::operator new(size_t size, int code_size) throw() {
  51   assert(size == sizeof(VtableStub), "mismatched size");
  52   // compute real VtableStub size (rounded to nearest word)
  53   const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
  54   // malloc them in chunks to minimize header overhead
  55   const int chunk_factor = 32;
  56   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
  57     const int bytes = chunk_factor * real_size + pd_code_alignment();
  58     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
  59     if (blob == NULL) {
  60       return NULL;
  61     }
  62     _chunk = blob->content_begin();
  63     _chunk_end = _chunk + bytes;
  64     Forte::register_stub("vtable stub", _chunk, _chunk_end);






  65     align_chunk();
  66   }
  67   assert(_chunk + real_size <= _chunk_end, "bad allocation");
  68   void* res = _chunk;
  69   _chunk += real_size;
  70   align_chunk();
  71  return res;
  72 }
  73 
  74 
  75 void VtableStub::print_on(outputStream* st) const {
  76   st->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
  77              index(), receiver_location(), code_begin(), code_end());
  78 }
  79 
  80 
  81 // -----------------------------------------------------------------------------------------
  82 // Implementation of VtableStubs
  83 //
  84 // For each hash value there's a linked list of vtable stubs (with that


 107 
 108   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
 109   if (s == NULL) {
 110     if (is_vtable_stub) {
 111       s = create_vtable_stub(vtable_index);
 112     } else {
 113       s = create_itable_stub(vtable_index);
 114     }
 115 
 116     // Creation of vtable or itable can fail if there is not enough free space in the code cache.
 117     if (s == NULL) {
 118       return NULL;
 119     }
 120 
 121     enter(is_vtable_stub, vtable_index, s);
 122     if (PrintAdapterHandlers) {
 123       tty->print_cr("Decoding VtableStub %s[%d]@%d",
 124                     is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
 125       Disassembler::decode(s->code_begin(), s->code_end());
 126     }
 127     // Notify JVMTI about this stub. The event will be recorded by the enclosing
 128     // JvmtiDynamicCodeEventCollector and posted when this thread has released
 129     // all locks.
 130     if (JvmtiExport::should_post_dynamic_code_generated()) {
 131       JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
 132                                                                    s->code_begin(), s->code_end());
 133     }
 134   }
 135   return s->entry_point();
 136 }
 137 
 138 
 139 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
 140   // Assumption: receiver_location < 4 in most cases.
 141   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
 142   return (is_vtable_stub ? ~hash : hash)  & mask;
 143 }
 144 
 145 
 146 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
 147   MutexLocker ml(VtableStubs_lock);
 148   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
 149   VtableStub* s = _table[hash];
 150   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
 151   return s;
 152 }
 153 
 154 


 179   return stub_containing(pc) != NULL;
 180 }
 181 
 182 
 183 VtableStub* VtableStubs::stub_containing(address pc) {
 184   // Note: No locking needed since any change to the data structure
 185   //       happens with an atomic store into it (we don't care about
 186   //       consistency with the _number_of_vtable_stubs counter).
 187   for (int i = 0; i < N; i++) {
 188     for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 189       if (s->contains(pc)) return s;
 190     }
 191   }
 192   return NULL;
 193 }
 194 
 195 void vtableStubs_init() {
 196   VtableStubs::initialize();
 197 }
 198 
 199 void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
 200     for (int i = 0; i < N; i++) {
 201         for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
 202             f(s);
 203         }
 204     }
 205 }
 206 
 207 
 208 //-----------------------------------------------------------------------------------------------------
 209 // Non-product code
 210 #ifndef PRODUCT
 211 
 212 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
 213   ResourceMark rm;
 214   HandleMark hm;
 215   Klass* klass = receiver->klass();
 216   InstanceKlass* ik = InstanceKlass::cast(klass);
 217   klassVtable* vt = ik->vtable();
 218   ik->print();
 219   fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
 220                 "index %d (vtable length %d)",
 221                 (address)receiver, index, vt->length()));
 222 }
 223 
 224 #endif // Product