1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/vtableStubs.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/instanceKlass.hpp" 31 #include "oops/klassVtable.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/forte.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #ifdef COMPILER2 39 #include "opto/matcher.hpp" 40 #endif 41 42 // ----------------------------------------------------------------------------------------- 43 // Implementation of VtableStub 44 45 address VtableStub::_chunk = NULL; 46 address VtableStub::_chunk_end = NULL; 47 VMReg VtableStub::_receiver_location = VMRegImpl::Bad(); 48 49 50 void* VtableStub::operator new(size_t size, int code_size) throw() { 51 assert(size == sizeof(VtableStub), "mismatched size"); 52 // compute real VtableStub size (rounded to nearest word) 53 const int real_size = round_to(code_size + sizeof(VtableStub), wordSize); 54 // malloc them in chunks to minimize header overhead 55 const int chunk_factor = 32; 56 if (_chunk == NULL || _chunk + real_size > _chunk_end) { 57 const int bytes = chunk_factor * real_size + pd_code_alignment(); 58 BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); 59 if (blob == NULL) { 60 return NULL; 61 } 62 _chunk = blob->content_begin(); 63 _chunk_end = _chunk + bytes; 64 Forte::register_stub("vtable stub", _chunk, _chunk_end); 65 align_chunk(); 66 } 67 assert(_chunk + real_size <= _chunk_end, "bad allocation"); 68 void* res = _chunk; 69 _chunk += real_size; 70 align_chunk(); 71 return res; 72 } 73 74 75 void VtableStub::print_on(outputStream* st) const { 76 st->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)", 77 index(), receiver_location(), code_begin(), code_end()); 78 } 79 80 81 // ----------------------------------------------------------------------------------------- 82 // Implementation of VtableStubs 83 // 84 // For each hash value there's a linked list of vtable stubs (with that 85 // hash value). Each list is anchored in a little hash _table, indexed 86 // by that hash value. 87 88 VtableStub* VtableStubs::_table[VtableStubs::N]; 89 int VtableStubs::_number_of_vtable_stubs = 0; 90 91 92 void VtableStubs::initialize() { 93 VtableStub::_receiver_location = SharedRuntime::name_for_receiver(); 94 { 95 MutexLocker ml(VtableStubs_lock); 96 assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once"); 97 assert(is_power_of_2(N), "N must be a power of 2"); 98 for (int i = 0; i < N; i++) { 99 _table[i] = NULL; 100 } 101 } 102 } 103 104 105 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) { 106 assert(vtable_index >= 0, "must be positive"); 107 108 VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL; 109 if (s == NULL) { 110 if (is_vtable_stub) { 111 s = create_vtable_stub(vtable_index); 112 } else { 113 s = create_itable_stub(vtable_index); 114 } 115 116 // Creation of vtable or itable can fail if there is not enough free space in the code cache. 117 if (s == NULL) { 118 return NULL; 119 } 120 121 enter(is_vtable_stub, vtable_index, s); 122 if (PrintAdapterHandlers) { 123 tty->print_cr("Decoding VtableStub %s[%d]@%d", 124 is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location()); 125 Disassembler::decode(s->code_begin(), s->code_end()); 126 } 127 // Notify JVMTI about this stub. The event will be recorded by the enclosing 128 // JvmtiDynamicCodeEventCollector and posted when this thread has released 129 // all locks. 130 if (JvmtiExport::should_post_dynamic_code_generated()) { 131 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", 132 s->code_begin(), s->code_end()); 133 } 134 } 135 return s->entry_point(); 136 } 137 138 139 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){ 140 // Assumption: receiver_location < 4 in most cases. 141 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index; 142 return (is_vtable_stub ? ~hash : hash) & mask; 143 } 144 145 146 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) { 147 MutexLocker ml(VtableStubs_lock); 148 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index); 149 VtableStub* s = _table[hash]; 150 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next(); 151 return s; 152 } 153 154 155 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) { 156 MutexLocker ml(VtableStubs_lock); 157 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub"); 158 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index); 159 // enter s at the beginning of the corresponding list 160 s->set_next(_table[h]); 161 _table[h] = s; 162 _number_of_vtable_stubs++; 163 } 164 165 166 bool VtableStubs::is_entry_point(address pc) { 167 MutexLocker ml(VtableStubs_lock); 168 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset()); 169 uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index()); 170 VtableStub* s; 171 for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {} 172 return s == stub; 173 } 174 175 176 bool VtableStubs::contains(address pc) { 177 // simple solution for now - we may want to use 178 // a faster way if this function is called often 179 return stub_containing(pc) != NULL; 180 } 181 182 183 VtableStub* VtableStubs::stub_containing(address pc) { 184 // Note: No locking needed since any change to the data structure 185 // happens with an atomic store into it (we don't care about 186 // consistency with the _number_of_vtable_stubs counter). 187 for (int i = 0; i < N; i++) { 188 for (VtableStub* s = _table[i]; s != NULL; s = s->next()) { 189 if (s->contains(pc)) return s; 190 } 191 } 192 return NULL; 193 } 194 195 void vtableStubs_init() { 196 VtableStubs::initialize(); 197 } 198 199 void VtableStubs::vtable_stub_do(void f(VtableStub*)) { 200 for (int i = 0; i < N; i++) { 201 for (VtableStub* s = _table[i]; s != NULL; s = s->next()) { 202 f(s); 203 } 204 } 205 } 206 207 208 //----------------------------------------------------------------------------------------------------- 209 // Non-product code 210 #ifndef PRODUCT 211 212 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) { 213 ResourceMark rm; 214 HandleMark hm; 215 Klass* klass = receiver->klass(); 216 InstanceKlass* ik = InstanceKlass::cast(klass); 217 klassVtable* vt = ik->vtable(); 218 ik->print(); 219 fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", " 220 "index %d (vtable length %d)", 221 (address)receiver, index, vt->length())); 222 } 223 224 #endif // Product