106 // and use the reported "estimate" value.
107 // Here is a list of observed worst-case values:
108 // vtable itable
109 // aarch64: 460 324
110 // arm: ? ?
111 // ppc (linux, BE): 404 288
112 // ppc (linux, LE): 356 276
113 // ppc (AIX): 416 296
114 // s390x: 408 256
115 // Solaris-sparc: 792 348
116 // x86 (Linux): 670 309
117 // x86 (MacOS): 682 321
118 static const int first_vtableStub_size = 1024;
119 static const int first_itableStub_size = 512;
120 #endif
121
122
123 void VtableStubs::initialize() {
124 VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
125 {
126 MutexLocker ml(VtableStubs_lock);
127 assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
128 assert(is_power_of_2(N), "N must be a power of 2");
129 for (int i = 0; i < N; i++) {
130 _table[i] = NULL;
131 }
132 }
133 }
134
135
136 int VtableStubs::code_size_limit(bool is_vtable_stub) {
137 if (is_vtable_stub) {
138 return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
139 } else { // itable stub
140 return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
141 }
142 } // code_size_limit
143
144
145 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
|
106 // and use the reported "estimate" value.
107 // Here is a list of observed worst-case values:
108 // vtable itable
109 // aarch64: 460 324
110 // arm: ? ?
111 // ppc (linux, BE): 404 288
112 // ppc (linux, LE): 356 276
113 // ppc (AIX): 416 296
114 // s390x: 408 256
115 // Solaris-sparc: 792 348
116 // x86 (Linux): 670 309
117 // x86 (MacOS): 682 321
118 static const int first_vtableStub_size = 1024;
119 static const int first_itableStub_size = 512;
120 #endif
121
122
123 void VtableStubs::initialize() {
124 VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
125 {
126 MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
127 assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
128 assert(is_power_of_2(N), "N must be a power of 2");
129 for (int i = 0; i < N; i++) {
130 _table[i] = NULL;
131 }
132 }
133 }
134
135
136 int VtableStubs::code_size_limit(bool is_vtable_stub) {
137 if (is_vtable_stub) {
138 return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
139 } else { // itable stub
140 return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
141 }
142 } // code_size_limit
143
144
145 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
|
229 // Notify JVMTI about this stub. The event will be recorded by the enclosing
230 // JvmtiDynamicCodeEventCollector and posted when this thread has released
231 // all locks.
232 if (JvmtiExport::should_post_dynamic_code_generated()) {
233 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
234 s->code_begin(), s->code_end());
235 }
236 }
237 return s->entry_point();
238 }
239
240
241 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
242 // Assumption: receiver_location < 4 in most cases.
243 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
244 return (is_vtable_stub ? ~hash : hash) & mask;
245 }
246
247
248 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
249 MutexLocker ml(VtableStubs_lock);
250 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
251 VtableStub* s = _table[hash];
252 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
253 return s;
254 }
255
256
257 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
258 MutexLocker ml(VtableStubs_lock);
259 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
260 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
261 // enter s at the beginning of the corresponding list
262 s->set_next(_table[h]);
263 _table[h] = s;
264 _number_of_vtable_stubs++;
265 }
266
267 VtableStub* VtableStubs::entry_point(address pc) {
268 MutexLocker ml(VtableStubs_lock);
269 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
270 uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
271 VtableStub* s;
272 for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
273 return (s == stub) ? s : NULL;
274 }
275
276 bool VtableStubs::contains(address pc) {
277 // simple solution for now - we may want to use
278 // a faster way if this function is called often
279 return stub_containing(pc) != NULL;
280 }
281
282
283 VtableStub* VtableStubs::stub_containing(address pc) {
284 // Note: No locking needed since any change to the data structure
285 // happens with an atomic store into it (we don't care about
286 // consistency with the _number_of_vtable_stubs counter).
287 for (int i = 0; i < N; i++) {
|
229 // Notify JVMTI about this stub. The event will be recorded by the enclosing
230 // JvmtiDynamicCodeEventCollector and posted when this thread has released
231 // all locks.
232 if (JvmtiExport::should_post_dynamic_code_generated()) {
233 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
234 s->code_begin(), s->code_end());
235 }
236 }
237 return s->entry_point();
238 }
239
240
241 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
242 // Assumption: receiver_location < 4 in most cases.
243 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
244 return (is_vtable_stub ? ~hash : hash) & mask;
245 }
246
247
248 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
249 MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
250 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
251 VtableStub* s = _table[hash];
252 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
253 return s;
254 }
255
256
257 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
258 MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
259 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
260 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
261 // enter s at the beginning of the corresponding list
262 s->set_next(_table[h]);
263 _table[h] = s;
264 _number_of_vtable_stubs++;
265 }
266
267 VtableStub* VtableStubs::entry_point(address pc) {
268 MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
269 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
270 uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
271 VtableStub* s;
272 for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
273 return (s == stub) ? s : NULL;
274 }
275
276 bool VtableStubs::contains(address pc) {
277 // simple solution for now - we may want to use
278 // a faster way if this function is called often
279 return stub_containing(pc) != NULL;
280 }
281
282
283 VtableStub* VtableStubs::stub_containing(address pc) {
284 // Note: No locking needed since any change to the data structure
285 // happens with an atomic store into it (we don't care about
286 // consistency with the _number_of_vtable_stubs counter).
287 for (int i = 0; i < N; i++) {
|