rev 47400 : [mq]: cmpxchg_ptr
1 /*
2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
38 #include "gc/g1/heapRegion.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gcLocker.inline.hpp"
42 #include "interpreter/bytecode.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/linkResolver.hpp"
45 #include "logging/log.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "oops/typeArrayOop.inline.hpp"
52 #include "opto/ad.hpp"
53 #include "opto/addnode.hpp"
54 #include "opto/callnode.hpp"
55 #include "opto/cfgnode.hpp"
56 #include "opto/graphKit.hpp"
57 #include "opto/machnode.hpp"
58 #include "opto/matcher.hpp"
59 #include "opto/memnode.hpp"
60 #include "opto/mulnode.hpp"
61 #include "opto/runtime.hpp"
62 #include "opto/subnode.hpp"
63 #include "runtime/atomic.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/interfaceSupport.hpp"
66 #include "runtime/javaCalls.hpp"
67 #include "runtime/sharedRuntime.hpp"
68 #include "runtime/signature.hpp"
69 #include "runtime/threadCritical.hpp"
70 #include "runtime/vframe.hpp"
71 #include "runtime/vframeArray.hpp"
72 #include "runtime/vframe_hp.hpp"
73 #include "utilities/copy.hpp"
74 #include "utilities/preserveException.hpp"
75
76
77 // For debugging purposes:
78 // To force FullGCALot inside a runtime function, add the following two lines
79 //
80 // Universe::release_fullgc_alot_dummy();
81 // MarkSweep::invoke(0, "Debugging");
82 //
83 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
84
85
86
87
88 // Compiled code entry points
89 address OptoRuntime::_new_instance_Java = NULL;
90 address OptoRuntime::_new_array_Java = NULL;
91 address OptoRuntime::_new_array_nozero_Java = NULL;
92 address OptoRuntime::_multianewarray2_Java = NULL;
93 address OptoRuntime::_multianewarray3_Java = NULL;
94 address OptoRuntime::_multianewarray4_Java = NULL;
95 address OptoRuntime::_multianewarray5_Java = NULL;
96 address OptoRuntime::_multianewarrayN_Java = NULL;
97 address OptoRuntime::_g1_wb_pre_Java = NULL;
98 address OptoRuntime::_g1_wb_post_Java = NULL;
99 address OptoRuntime::_vtable_must_compile_Java = NULL;
100 address OptoRuntime::_complete_monitor_locking_Java = NULL;
101 address OptoRuntime::_monitor_notify_Java = NULL;
102 address OptoRuntime::_monitor_notifyAll_Java = NULL;
103 address OptoRuntime::_rethrow_Java = NULL;
104
105 address OptoRuntime::_slow_arraycopy_Java = NULL;
106 address OptoRuntime::_register_finalizer_Java = NULL;
107
108 ExceptionBlob* OptoRuntime::_exception_blob;
109
110 // This should be called in an assertion at the start of OptoRuntime routines
111 // which are entered from compiled code (all of them)
112 #ifdef ASSERT
113 static bool check_compiled_frame(JavaThread* thread) {
114 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
115 RegisterMap map(thread, false);
116 frame caller = thread->last_frame().sender(&map);
117 assert(caller.is_compiled_frame(), "not being called from compiled like code");
118 return true;
119 }
120 #endif // ASSERT
121
122
123 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
124 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \
125 if (var == NULL) { return false; }
126
127 bool OptoRuntime::generate(ciEnv* env) {
128
129 generate_exception_blob();
130
131 // Note: tls: Means fetching the return oop out of the thread-local storage
132 //
133 // variable/name type-function-gen , runtime method ,fncy_jp, tls,save_args,retpc
134 // -------------------------------------------------------------------------------------------------------------------------------
135 gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true , false, false);
136 gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true , false, false);
137 gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true , false, false);
138 gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true , false, false);
139 gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true , false, false);
140 gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true , false, false);
141 gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true , false, false);
142 gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true , false, false);
143 gen(env, _g1_wb_pre_Java , g1_wb_pre_Type , SharedRuntime::g1_wb_pre , 0 , false, false, false);
144 gen(env, _g1_wb_post_Java , g1_wb_post_Type , SharedRuntime::g1_wb_post , 0 , false, false, false);
145 gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
146 gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false, false);
147 gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false, false);
148 gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , false, true );
149
150 gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false);
151 gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false, false);
152
153 return true;
154 }
155
156 #undef gen
157
158
159 // Helper method to do generation of RunTimeStub's
160 address OptoRuntime::generate_stub( ciEnv* env,
161 TypeFunc_generator gen, address C_function,
162 const char *name, int is_fancy_jump,
163 bool pass_tls,
164 bool save_argument_registers,
165 bool return_pc) {
166
167 // Matching the default directive, we currently have no method to match.
168 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
169 ResourceMark rm;
170 Compile C( env, gen, C_function, name, is_fancy_jump, pass_tls, save_argument_registers, return_pc, directive);
171 DirectivesStack::release(directive);
172 return C.stub_entry_point();
173 }
174
175 const char* OptoRuntime::stub_name(address entry) {
176 #ifndef PRODUCT
177 CodeBlob* cb = CodeCache::find_blob(entry);
178 RuntimeStub* rs =(RuntimeStub *)cb;
179 assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub");
180 return rs->name();
181 #else
182 // Fast implementation for product mode (maybe it should be inlined too)
183 return "runtime stub";
184 #endif
185 }
186
187
188 //=============================================================================
189 // Opto compiler runtime routines
190 //=============================================================================
191
192
193 //=============================allocation======================================
194 // We failed the fast-path allocation. Now we need to do a scavenge or GC
195 // and try allocation again.
196
197 void OptoRuntime::new_store_pre_barrier(JavaThread* thread) {
198 // After any safepoint, just before going back to compiled code,
199 // we inform the GC that we will be doing initializing writes to
200 // this object in the future without emitting card-marks, so
201 // GC may take any compensating steps.
202 // NOTE: Keep this code consistent with GraphKit::store_barrier.
203
204 oop new_obj = thread->vm_result();
205 if (new_obj == NULL) return;
206
207 assert(Universe::heap()->can_elide_tlab_store_barriers(),
208 "compiler must check this first");
209 // GC may decide to give back a safer copy of new_obj.
210 new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
211 thread->set_vm_result(new_obj);
212 }
213
214 // object allocation
215 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread))
216 JRT_BLOCK;
217 #ifndef PRODUCT
218 SharedRuntime::_new_instance_ctr++; // new instance requires GC
219 #endif
220 assert(check_compiled_frame(thread), "incorrect caller");
221
222 // These checks are cheap to make and support reflective allocation.
223 int lh = klass->layout_helper();
224 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
225 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
226 klass->check_valid_for_instantiation(false, THREAD);
227 if (!HAS_PENDING_EXCEPTION) {
228 InstanceKlass::cast(klass)->initialize(THREAD);
229 }
230 }
231
232 if (!HAS_PENDING_EXCEPTION) {
233 // Scavenge and allocate an instance.
234 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
235 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
236 thread->set_vm_result(result);
237
238 // Pass oops back through thread local storage. Our apparent type to Java
239 // is that we return an oop, but we can block on exit from this routine and
240 // a GC can trash the oop in C's return register. The generated stub will
241 // fetch the oop from TLS after any possible GC.
242 }
243
244 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
245 JRT_BLOCK_END;
246
247 if (GraphKit::use_ReduceInitialCardMarks()) {
248 // inform GC that we won't do card marks for initializing writes.
249 new_store_pre_barrier(thread);
250 }
251 JRT_END
252
253
254 // array allocation
255 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread *thread))
256 JRT_BLOCK;
257 #ifndef PRODUCT
258 SharedRuntime::_new_array_ctr++; // new array requires GC
259 #endif
260 assert(check_compiled_frame(thread), "incorrect caller");
261
262 // Scavenge and allocate an instance.
263 oop result;
264
265 if (array_type->is_typeArray_klass()) {
266 // The oopFactory likes to work with the element type.
267 // (We could bypass the oopFactory, since it doesn't add much value.)
268 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
269 result = oopFactory::new_typeArray(elem_type, len, THREAD);
270 } else {
271 // Although the oopFactory likes to work with the elem_type,
272 // the compiler prefers the array_type, since it must already have
273 // that latter value in hand for the fast path.
274 Handle holder(THREAD, array_type->klass_holder()); // keep the array klass alive
275 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
276 result = oopFactory::new_objArray(elem_type, len, THREAD);
277 }
278
279 // Pass oops back through thread local storage. Our apparent type to Java
280 // is that we return an oop, but we can block on exit from this routine and
281 // a GC can trash the oop in C's return register. The generated stub will
282 // fetch the oop from TLS after any possible GC.
283 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
284 thread->set_vm_result(result);
285 JRT_BLOCK_END;
286
287 if (GraphKit::use_ReduceInitialCardMarks()) {
288 // inform GC that we won't do card marks for initializing writes.
289 new_store_pre_barrier(thread);
290 }
291 JRT_END
292
293 // array allocation without zeroing
294 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread *thread))
295 JRT_BLOCK;
296 #ifndef PRODUCT
297 SharedRuntime::_new_array_ctr++; // new array requires GC
298 #endif
299 assert(check_compiled_frame(thread), "incorrect caller");
300
301 // Scavenge and allocate an instance.
302 oop result;
303
304 assert(array_type->is_typeArray_klass(), "should be called only for type array");
305 // The oopFactory likes to work with the element type.
306 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
307 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
308
309 // Pass oops back through thread local storage. Our apparent type to Java
310 // is that we return an oop, but we can block on exit from this routine and
311 // a GC can trash the oop in C's return register. The generated stub will
312 // fetch the oop from TLS after any possible GC.
313 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
314 thread->set_vm_result(result);
315 JRT_BLOCK_END;
316
317 if (GraphKit::use_ReduceInitialCardMarks()) {
318 // inform GC that we won't do card marks for initializing writes.
319 new_store_pre_barrier(thread);
320 }
321
322 oop result = thread->vm_result();
323 if ((len > 0) && (result != NULL) &&
324 is_deoptimized_caller_frame(thread)) {
325 // Zero array here if the caller is deoptimized.
326 int size = ((typeArrayOop)result)->object_size();
327 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
328 const size_t hs = arrayOopDesc::header_size(elem_type);
329 // Align to next 8 bytes to avoid trashing arrays's length.
330 const size_t aligned_hs = align_object_offset(hs);
331 HeapWord* obj = (HeapWord*)result;
332 if (aligned_hs > hs) {
333 Copy::zero_to_words(obj+hs, aligned_hs-hs);
334 }
335 // Optimized zeroing.
336 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
337 }
338
339 JRT_END
340
341 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
342
343 // multianewarray for 2 dimensions
344 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread *thread))
345 #ifndef PRODUCT
346 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
347 #endif
348 assert(check_compiled_frame(thread), "incorrect caller");
349 assert(elem_type->is_klass(), "not a class");
350 jint dims[2];
351 dims[0] = len1;
352 dims[1] = len2;
353 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive
354 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
355 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
356 thread->set_vm_result(obj);
357 JRT_END
358
359 // multianewarray for 3 dimensions
360 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread *thread))
361 #ifndef PRODUCT
362 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
363 #endif
364 assert(check_compiled_frame(thread), "incorrect caller");
365 assert(elem_type->is_klass(), "not a class");
366 jint dims[3];
367 dims[0] = len1;
368 dims[1] = len2;
369 dims[2] = len3;
370 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive
371 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
372 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
373 thread->set_vm_result(obj);
374 JRT_END
375
376 // multianewarray for 4 dimensions
377 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread *thread))
378 #ifndef PRODUCT
379 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
380 #endif
381 assert(check_compiled_frame(thread), "incorrect caller");
382 assert(elem_type->is_klass(), "not a class");
383 jint dims[4];
384 dims[0] = len1;
385 dims[1] = len2;
386 dims[2] = len3;
387 dims[3] = len4;
388 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive
389 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
390 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
391 thread->set_vm_result(obj);
392 JRT_END
393
394 // multianewarray for 5 dimensions
395 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread *thread))
396 #ifndef PRODUCT
397 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
398 #endif
399 assert(check_compiled_frame(thread), "incorrect caller");
400 assert(elem_type->is_klass(), "not a class");
401 jint dims[5];
402 dims[0] = len1;
403 dims[1] = len2;
404 dims[2] = len3;
405 dims[3] = len4;
406 dims[4] = len5;
407 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive
408 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
409 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
410 thread->set_vm_result(obj);
411 JRT_END
412
413 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread *thread))
414 assert(check_compiled_frame(thread), "incorrect caller");
415 assert(elem_type->is_klass(), "not a class");
416 assert(oop(dims)->is_typeArray(), "not an array");
417
418 ResourceMark rm;
419 jint len = dims->length();
420 assert(len > 0, "Dimensions array should contain data");
421 jint *j_dims = typeArrayOop(dims)->int_at_addr(0);
422 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
423 Copy::conjoint_jints_atomic(j_dims, c_dims, len);
424
425 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive
426 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
427 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
428 thread->set_vm_result(obj);
429 JRT_END
430
431 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread *thread))
432
433 // Very few notify/notifyAll operations find any threads on the waitset, so
434 // the dominant fast-path is to simply return.
435 // Relatedly, it's critical that notify/notifyAll be fast in order to
436 // reduce lock hold times.
437 if (!SafepointSynchronize::is_synchronizing()) {
438 if (ObjectSynchronizer::quick_notify(obj, thread, false)) {
439 return;
440 }
441 }
442
443 // This is the case the fast-path above isn't provisioned to handle.
444 // The fast-path is designed to handle frequently arising cases in an efficient manner.
445 // (The fast-path is just a degenerate variant of the slow-path).
446 // Perform the dreaded state transition and pass control into the slow-path.
447 JRT_BLOCK;
448 Handle h_obj(THREAD, obj);
449 ObjectSynchronizer::notify(h_obj, CHECK);
450 JRT_BLOCK_END;
451 JRT_END
452
453 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread *thread))
454
455 if (!SafepointSynchronize::is_synchronizing() ) {
456 if (ObjectSynchronizer::quick_notify(obj, thread, true)) {
457 return;
458 }
459 }
460
461 // This is the case the fast-path above isn't provisioned to handle.
462 // The fast-path is designed to handle frequently arising cases in an efficient manner.
463 // (The fast-path is just a degenerate variant of the slow-path).
464 // Perform the dreaded state transition and pass control into the slow-path.
465 JRT_BLOCK;
466 Handle h_obj(THREAD, obj);
467 ObjectSynchronizer::notifyall(h_obj, CHECK);
468 JRT_BLOCK_END;
469 JRT_END
470
471 const TypeFunc *OptoRuntime::new_instance_Type() {
472 // create input type (domain)
473 const Type **fields = TypeTuple::fields(1);
474 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
475 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
476
477 // create result type (range)
478 fields = TypeTuple::fields(1);
479 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
480
481 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
482
483 return TypeFunc::make(domain, range);
484 }
485
486
487 const TypeFunc *OptoRuntime::athrow_Type() {
488 // create input type (domain)
489 const Type **fields = TypeTuple::fields(1);
490 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
491 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
492
493 // create result type (range)
494 fields = TypeTuple::fields(0);
495
496 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
497
498 return TypeFunc::make(domain, range);
499 }
500
501
502 const TypeFunc *OptoRuntime::new_array_Type() {
503 // create input type (domain)
504 const Type **fields = TypeTuple::fields(2);
505 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
506 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
507 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
508
509 // create result type (range)
510 fields = TypeTuple::fields(1);
511 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
512
513 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
514
515 return TypeFunc::make(domain, range);
516 }
517
518 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) {
519 // create input type (domain)
520 const int nargs = ndim + 1;
521 const Type **fields = TypeTuple::fields(nargs);
522 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
523 for( int i = 1; i < nargs; i++ )
524 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
525 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
526
527 // create result type (range)
528 fields = TypeTuple::fields(1);
529 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
530 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
531
532 return TypeFunc::make(domain, range);
533 }
534
535 const TypeFunc *OptoRuntime::multianewarray2_Type() {
536 return multianewarray_Type(2);
537 }
538
539 const TypeFunc *OptoRuntime::multianewarray3_Type() {
540 return multianewarray_Type(3);
541 }
542
543 const TypeFunc *OptoRuntime::multianewarray4_Type() {
544 return multianewarray_Type(4);
545 }
546
547 const TypeFunc *OptoRuntime::multianewarray5_Type() {
548 return multianewarray_Type(5);
549 }
550
551 const TypeFunc *OptoRuntime::multianewarrayN_Type() {
552 // create input type (domain)
553 const Type **fields = TypeTuple::fields(2);
554 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
555 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
556 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
557
558 // create result type (range)
559 fields = TypeTuple::fields(1);
560 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
561 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
562
563 return TypeFunc::make(domain, range);
564 }
565
566 const TypeFunc *OptoRuntime::g1_wb_pre_Type() {
567 const Type **fields = TypeTuple::fields(2);
568 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
569 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
570 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
571
572 // create result type (range)
573 fields = TypeTuple::fields(0);
574 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
575
576 return TypeFunc::make(domain, range);
577 }
578
579 const TypeFunc *OptoRuntime::g1_wb_post_Type() {
580
581 const Type **fields = TypeTuple::fields(2);
582 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr
583 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
584 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
585
586 // create result type (range)
587 fields = TypeTuple::fields(0);
588 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
589
590 return TypeFunc::make(domain, range);
591 }
592
593 const TypeFunc *OptoRuntime::uncommon_trap_Type() {
594 // create input type (domain)
595 const Type **fields = TypeTuple::fields(1);
596 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
597 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
598
599 // create result type (range)
600 fields = TypeTuple::fields(0);
601 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
602
603 return TypeFunc::make(domain, range);
604 }
605
606 //-----------------------------------------------------------------------------
607 // Monitor Handling
608 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
609 // create input type (domain)
610 const Type **fields = TypeTuple::fields(2);
611 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
612 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
613 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
614
615 // create result type (range)
616 fields = TypeTuple::fields(0);
617
618 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
619
620 return TypeFunc::make(domain,range);
621 }
622
623
624 //-----------------------------------------------------------------------------
625 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
626 // create input type (domain)
627 const Type **fields = TypeTuple::fields(3);
628 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
629 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
630 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
631 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
632
633 // create result type (range)
634 fields = TypeTuple::fields(0);
635
636 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
637
638 return TypeFunc::make(domain, range);
639 }
640
641 const TypeFunc *OptoRuntime::monitor_notify_Type() {
642 // create input type (domain)
643 const Type **fields = TypeTuple::fields(1);
644 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
645 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
646
647 // create result type (range)
648 fields = TypeTuple::fields(0);
649 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
650 return TypeFunc::make(domain, range);
651 }
652
653 const TypeFunc* OptoRuntime::flush_windows_Type() {
654 // create input type (domain)
655 const Type** fields = TypeTuple::fields(1);
656 fields[TypeFunc::Parms+0] = NULL; // void
657 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
658
659 // create result type
660 fields = TypeTuple::fields(1);
661 fields[TypeFunc::Parms+0] = NULL; // void
662 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
663
664 return TypeFunc::make(domain, range);
665 }
666
667 const TypeFunc* OptoRuntime::l2f_Type() {
668 // create input type (domain)
669 const Type **fields = TypeTuple::fields(2);
670 fields[TypeFunc::Parms+0] = TypeLong::LONG;
671 fields[TypeFunc::Parms+1] = Type::HALF;
672 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
673
674 // create result type (range)
675 fields = TypeTuple::fields(1);
676 fields[TypeFunc::Parms+0] = Type::FLOAT;
677 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
678
679 return TypeFunc::make(domain, range);
680 }
681
682 const TypeFunc* OptoRuntime::modf_Type() {
683 const Type **fields = TypeTuple::fields(2);
684 fields[TypeFunc::Parms+0] = Type::FLOAT;
685 fields[TypeFunc::Parms+1] = Type::FLOAT;
686 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
687
688 // create result type (range)
689 fields = TypeTuple::fields(1);
690 fields[TypeFunc::Parms+0] = Type::FLOAT;
691
692 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
693
694 return TypeFunc::make(domain, range);
695 }
696
697 const TypeFunc *OptoRuntime::Math_D_D_Type() {
698 // create input type (domain)
699 const Type **fields = TypeTuple::fields(2);
700 // Symbol* name of class to be loaded
701 fields[TypeFunc::Parms+0] = Type::DOUBLE;
702 fields[TypeFunc::Parms+1] = Type::HALF;
703 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
704
705 // create result type (range)
706 fields = TypeTuple::fields(2);
707 fields[TypeFunc::Parms+0] = Type::DOUBLE;
708 fields[TypeFunc::Parms+1] = Type::HALF;
709 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
710
711 return TypeFunc::make(domain, range);
712 }
713
714 const TypeFunc* OptoRuntime::Math_DD_D_Type() {
715 const Type **fields = TypeTuple::fields(4);
716 fields[TypeFunc::Parms+0] = Type::DOUBLE;
717 fields[TypeFunc::Parms+1] = Type::HALF;
718 fields[TypeFunc::Parms+2] = Type::DOUBLE;
719 fields[TypeFunc::Parms+3] = Type::HALF;
720 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
721
722 // create result type (range)
723 fields = TypeTuple::fields(2);
724 fields[TypeFunc::Parms+0] = Type::DOUBLE;
725 fields[TypeFunc::Parms+1] = Type::HALF;
726 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
727
728 return TypeFunc::make(domain, range);
729 }
730
731 //-------------- currentTimeMillis, currentTimeNanos, etc
732
733 const TypeFunc* OptoRuntime::void_long_Type() {
734 // create input type (domain)
735 const Type **fields = TypeTuple::fields(0);
736 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
737
738 // create result type (range)
739 fields = TypeTuple::fields(2);
740 fields[TypeFunc::Parms+0] = TypeLong::LONG;
741 fields[TypeFunc::Parms+1] = Type::HALF;
742 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
743
744 return TypeFunc::make(domain, range);
745 }
746
747 // arraycopy stub variations:
748 enum ArrayCopyType {
749 ac_fast, // void(ptr, ptr, size_t)
750 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
751 ac_slow, // void(ptr, int, ptr, int, int)
752 ac_generic // int(ptr, int, ptr, int, int)
753 };
754
755 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
756 // create input type (domain)
757 int num_args = (act == ac_fast ? 3 : 5);
758 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
759 int argcnt = num_args;
760 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
761 const Type** fields = TypeTuple::fields(argcnt);
762 int argp = TypeFunc::Parms;
763 fields[argp++] = TypePtr::NOTNULL; // src
764 if (num_size_args == 0) {
765 fields[argp++] = TypeInt::INT; // src_pos
766 }
767 fields[argp++] = TypePtr::NOTNULL; // dest
768 if (num_size_args == 0) {
769 fields[argp++] = TypeInt::INT; // dest_pos
770 fields[argp++] = TypeInt::INT; // length
771 }
772 while (num_size_args-- > 0) {
773 fields[argp++] = TypeX_X; // size in whatevers (size_t)
774 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
775 }
776 if (act == ac_checkcast) {
777 fields[argp++] = TypePtr::NOTNULL; // super_klass
778 }
779 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
780 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
781
782 // create result type if needed
783 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
784 fields = TypeTuple::fields(1);
785 if (retcnt == 0)
786 fields[TypeFunc::Parms+0] = NULL; // void
787 else
788 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
789 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
790 return TypeFunc::make(domain, range);
791 }
792
793 const TypeFunc* OptoRuntime::fast_arraycopy_Type() {
794 // This signature is simple: Two base pointers and a size_t.
795 return make_arraycopy_Type(ac_fast);
796 }
797
798 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() {
799 // An extension of fast_arraycopy_Type which adds type checking.
800 return make_arraycopy_Type(ac_checkcast);
801 }
802
803 const TypeFunc* OptoRuntime::slow_arraycopy_Type() {
804 // This signature is exactly the same as System.arraycopy.
805 // There are no intptr_t (int/long) arguments.
806 return make_arraycopy_Type(ac_slow);
807 }
808
809 const TypeFunc* OptoRuntime::generic_arraycopy_Type() {
810 // This signature is like System.arraycopy, except that it returns status.
811 return make_arraycopy_Type(ac_generic);
812 }
813
814
815 const TypeFunc* OptoRuntime::array_fill_Type() {
816 const Type** fields;
817 int argp = TypeFunc::Parms;
818 // create input type (domain): pointer, int, size_t
819 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
820 fields[argp++] = TypePtr::NOTNULL;
821 fields[argp++] = TypeInt::INT;
822 fields[argp++] = TypeX_X; // size in whatevers (size_t)
823 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
824 const TypeTuple *domain = TypeTuple::make(argp, fields);
825
826 // create result type
827 fields = TypeTuple::fields(1);
828 fields[TypeFunc::Parms+0] = NULL; // void
829 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
830
831 return TypeFunc::make(domain, range);
832 }
833
834 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant)
835 const TypeFunc* OptoRuntime::aescrypt_block_Type() {
836 // create input type (domain)
837 int num_args = 3;
838 if (Matcher::pass_original_key_for_aes()) {
839 num_args = 4;
840 }
841 int argcnt = num_args;
842 const Type** fields = TypeTuple::fields(argcnt);
843 int argp = TypeFunc::Parms;
844 fields[argp++] = TypePtr::NOTNULL; // src
845 fields[argp++] = TypePtr::NOTNULL; // dest
846 fields[argp++] = TypePtr::NOTNULL; // k array
847 if (Matcher::pass_original_key_for_aes()) {
848 fields[argp++] = TypePtr::NOTNULL; // original k array
849 }
850 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
851 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
852
853 // no result type needed
854 fields = TypeTuple::fields(1);
855 fields[TypeFunc::Parms+0] = NULL; // void
856 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
857 return TypeFunc::make(domain, range);
858 }
859
860 /**
861 * int updateBytesCRC32(int crc, byte* b, int len)
862 */
863 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() {
864 // create input type (domain)
865 int num_args = 3;
866 int argcnt = num_args;
867 const Type** fields = TypeTuple::fields(argcnt);
868 int argp = TypeFunc::Parms;
869 fields[argp++] = TypeInt::INT; // crc
870 fields[argp++] = TypePtr::NOTNULL; // src
871 fields[argp++] = TypeInt::INT; // len
872 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
873 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
874
875 // result type needed
876 fields = TypeTuple::fields(1);
877 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
878 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
879 return TypeFunc::make(domain, range);
880 }
881
882 /**
883 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table)
884 */
885 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() {
886 // create input type (domain)
887 int num_args = 4;
888 int argcnt = num_args;
889 const Type** fields = TypeTuple::fields(argcnt);
890 int argp = TypeFunc::Parms;
891 fields[argp++] = TypeInt::INT; // crc
892 fields[argp++] = TypePtr::NOTNULL; // buf
893 fields[argp++] = TypeInt::INT; // len
894 fields[argp++] = TypePtr::NOTNULL; // table
895 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
896 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
897
898 // result type needed
899 fields = TypeTuple::fields(1);
900 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
901 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
902 return TypeFunc::make(domain, range);
903 }
904
905 /**
906 * int updateBytesAdler32(int adler, bytes* b, int off, int len)
907 */
908 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() {
909 // create input type (domain)
910 int num_args = 3;
911 int argcnt = num_args;
912 const Type** fields = TypeTuple::fields(argcnt);
913 int argp = TypeFunc::Parms;
914 fields[argp++] = TypeInt::INT; // crc
915 fields[argp++] = TypePtr::NOTNULL; // src + offset
916 fields[argp++] = TypeInt::INT; // len
917 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
918 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
919
920 // result type needed
921 fields = TypeTuple::fields(1);
922 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
923 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
924 return TypeFunc::make(domain, range);
925 }
926
927 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
928 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
929 // create input type (domain)
930 int num_args = 5;
931 if (Matcher::pass_original_key_for_aes()) {
932 num_args = 6;
933 }
934 int argcnt = num_args;
935 const Type** fields = TypeTuple::fields(argcnt);
936 int argp = TypeFunc::Parms;
937 fields[argp++] = TypePtr::NOTNULL; // src
938 fields[argp++] = TypePtr::NOTNULL; // dest
939 fields[argp++] = TypePtr::NOTNULL; // k array
940 fields[argp++] = TypePtr::NOTNULL; // r array
941 fields[argp++] = TypeInt::INT; // src len
942 if (Matcher::pass_original_key_for_aes()) {
943 fields[argp++] = TypePtr::NOTNULL; // original k array
944 }
945 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
946 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
947
948 // returning cipher len (int)
949 fields = TypeTuple::fields(1);
950 fields[TypeFunc::Parms+0] = TypeInt::INT;
951 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
952 return TypeFunc::make(domain, range);
953 }
954
955 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
956 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() {
957 // create input type (domain)
958 int num_args = 7;
959 if (Matcher::pass_original_key_for_aes()) {
960 num_args = 8;
961 }
962 int argcnt = num_args;
963 const Type** fields = TypeTuple::fields(argcnt);
964 int argp = TypeFunc::Parms;
965 fields[argp++] = TypePtr::NOTNULL; // src
966 fields[argp++] = TypePtr::NOTNULL; // dest
967 fields[argp++] = TypePtr::NOTNULL; // k array
968 fields[argp++] = TypePtr::NOTNULL; // counter array
969 fields[argp++] = TypeInt::INT; // src len
970 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
971 fields[argp++] = TypePtr::NOTNULL; // saved used addr
972 if (Matcher::pass_original_key_for_aes()) {
973 fields[argp++] = TypePtr::NOTNULL; // original k array
974 }
975 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
976 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
977 // returning cipher len (int)
978 fields = TypeTuple::fields(1);
979 fields[TypeFunc::Parms + 0] = TypeInt::INT;
980 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
981 return TypeFunc::make(domain, range);
982 }
983
984 /*
985 * void implCompress(byte[] buf, int ofs)
986 */
987 const TypeFunc* OptoRuntime::sha_implCompress_Type() {
988 // create input type (domain)
989 int num_args = 2;
990 int argcnt = num_args;
991 const Type** fields = TypeTuple::fields(argcnt);
992 int argp = TypeFunc::Parms;
993 fields[argp++] = TypePtr::NOTNULL; // buf
994 fields[argp++] = TypePtr::NOTNULL; // state
995 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
996 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
997
998 // no result type needed
999 fields = TypeTuple::fields(1);
1000 fields[TypeFunc::Parms+0] = NULL; // void
1001 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1002 return TypeFunc::make(domain, range);
1003 }
1004
1005 /*
1006 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
1007 */
1008 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type() {
1009 // create input type (domain)
1010 int num_args = 4;
1011 int argcnt = num_args;
1012 const Type** fields = TypeTuple::fields(argcnt);
1013 int argp = TypeFunc::Parms;
1014 fields[argp++] = TypePtr::NOTNULL; // buf
1015 fields[argp++] = TypePtr::NOTNULL; // state
1016 fields[argp++] = TypeInt::INT; // ofs
1017 fields[argp++] = TypeInt::INT; // limit
1018 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1019 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1020
1021 // returning ofs (int)
1022 fields = TypeTuple::fields(1);
1023 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1024 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1025 return TypeFunc::make(domain, range);
1026 }
1027
1028 const TypeFunc* OptoRuntime::multiplyToLen_Type() {
1029 // create input type (domain)
1030 int num_args = 6;
1031 int argcnt = num_args;
1032 const Type** fields = TypeTuple::fields(argcnt);
1033 int argp = TypeFunc::Parms;
1034 fields[argp++] = TypePtr::NOTNULL; // x
1035 fields[argp++] = TypeInt::INT; // xlen
1036 fields[argp++] = TypePtr::NOTNULL; // y
1037 fields[argp++] = TypeInt::INT; // ylen
1038 fields[argp++] = TypePtr::NOTNULL; // z
1039 fields[argp++] = TypeInt::INT; // zlen
1040 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1041 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1042
1043 // no result type needed
1044 fields = TypeTuple::fields(1);
1045 fields[TypeFunc::Parms+0] = NULL;
1046 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1047 return TypeFunc::make(domain, range);
1048 }
1049
1050 const TypeFunc* OptoRuntime::squareToLen_Type() {
1051 // create input type (domain)
1052 int num_args = 4;
1053 int argcnt = num_args;
1054 const Type** fields = TypeTuple::fields(argcnt);
1055 int argp = TypeFunc::Parms;
1056 fields[argp++] = TypePtr::NOTNULL; // x
1057 fields[argp++] = TypeInt::INT; // len
1058 fields[argp++] = TypePtr::NOTNULL; // z
1059 fields[argp++] = TypeInt::INT; // zlen
1060 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1061 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1062
1063 // no result type needed
1064 fields = TypeTuple::fields(1);
1065 fields[TypeFunc::Parms+0] = NULL;
1066 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1067 return TypeFunc::make(domain, range);
1068 }
1069
1070 // for mulAdd calls, 2 pointers and 3 ints, returning int
1071 const TypeFunc* OptoRuntime::mulAdd_Type() {
1072 // create input type (domain)
1073 int num_args = 5;
1074 int argcnt = num_args;
1075 const Type** fields = TypeTuple::fields(argcnt);
1076 int argp = TypeFunc::Parms;
1077 fields[argp++] = TypePtr::NOTNULL; // out
1078 fields[argp++] = TypePtr::NOTNULL; // in
1079 fields[argp++] = TypeInt::INT; // offset
1080 fields[argp++] = TypeInt::INT; // len
1081 fields[argp++] = TypeInt::INT; // k
1082 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1083 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1084
1085 // returning carry (int)
1086 fields = TypeTuple::fields(1);
1087 fields[TypeFunc::Parms+0] = TypeInt::INT;
1088 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1089 return TypeFunc::make(domain, range);
1090 }
1091
1092 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() {
1093 // create input type (domain)
1094 int num_args = 7;
1095 int argcnt = num_args;
1096 const Type** fields = TypeTuple::fields(argcnt);
1097 int argp = TypeFunc::Parms;
1098 fields[argp++] = TypePtr::NOTNULL; // a
1099 fields[argp++] = TypePtr::NOTNULL; // b
1100 fields[argp++] = TypePtr::NOTNULL; // n
1101 fields[argp++] = TypeInt::INT; // len
1102 fields[argp++] = TypeLong::LONG; // inv
1103 fields[argp++] = Type::HALF;
1104 fields[argp++] = TypePtr::NOTNULL; // result
1105 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1106 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1107
1108 // result type needed
1109 fields = TypeTuple::fields(1);
1110 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1111
1112 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1113 return TypeFunc::make(domain, range);
1114 }
1115
1116 const TypeFunc* OptoRuntime::montgomerySquare_Type() {
1117 // create input type (domain)
1118 int num_args = 6;
1119 int argcnt = num_args;
1120 const Type** fields = TypeTuple::fields(argcnt);
1121 int argp = TypeFunc::Parms;
1122 fields[argp++] = TypePtr::NOTNULL; // a
1123 fields[argp++] = TypePtr::NOTNULL; // n
1124 fields[argp++] = TypeInt::INT; // len
1125 fields[argp++] = TypeLong::LONG; // inv
1126 fields[argp++] = Type::HALF;
1127 fields[argp++] = TypePtr::NOTNULL; // result
1128 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1129 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1130
1131 // result type needed
1132 fields = TypeTuple::fields(1);
1133 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1134
1135 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1136 return TypeFunc::make(domain, range);
1137 }
1138
1139 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() {
1140 // create input type (domain)
1141 int num_args = 4;
1142 int argcnt = num_args;
1143 const Type** fields = TypeTuple::fields(argcnt);
1144 int argp = TypeFunc::Parms;
1145 fields[argp++] = TypePtr::NOTNULL; // obja
1146 fields[argp++] = TypePtr::NOTNULL; // objb
1147 fields[argp++] = TypeInt::INT; // length, number of elements
1148 fields[argp++] = TypeInt::INT; // log2scale, element size
1149 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1150 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1151
1152 //return mismatch index (int)
1153 fields = TypeTuple::fields(1);
1154 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1155 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1156 return TypeFunc::make(domain, range);
1157 }
1158
1159 // GHASH block processing
1160 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() {
1161 int argcnt = 4;
1162
1163 const Type** fields = TypeTuple::fields(argcnt);
1164 int argp = TypeFunc::Parms;
1165 fields[argp++] = TypePtr::NOTNULL; // state
1166 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1167 fields[argp++] = TypePtr::NOTNULL; // data
1168 fields[argp++] = TypeInt::INT; // blocks
1169 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1170 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1171
1172 // result type needed
1173 fields = TypeTuple::fields(1);
1174 fields[TypeFunc::Parms+0] = NULL; // void
1175 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1176 return TypeFunc::make(domain, range);
1177 }
1178
1179 //------------- Interpreter state access for on stack replacement
1180 const TypeFunc* OptoRuntime::osr_end_Type() {
1181 // create input type (domain)
1182 const Type **fields = TypeTuple::fields(1);
1183 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1184 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1185
1186 // create result type
1187 fields = TypeTuple::fields(1);
1188 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1189 fields[TypeFunc::Parms+0] = NULL; // void
1190 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1191 return TypeFunc::make(domain, range);
1192 }
1193
1194 //-------------- methodData update helpers
1195
1196 const TypeFunc* OptoRuntime::profile_receiver_type_Type() {
1197 // create input type (domain)
1198 const Type **fields = TypeTuple::fields(2);
1199 fields[TypeFunc::Parms+0] = TypeAryPtr::NOTNULL; // methodData pointer
1200 fields[TypeFunc::Parms+1] = TypeInstPtr::BOTTOM; // receiver oop
1201 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
1202
1203 // create result type
1204 fields = TypeTuple::fields(1);
1205 fields[TypeFunc::Parms+0] = NULL; // void
1206 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1207 return TypeFunc::make(domain,range);
1208 }
1209
1210 JRT_LEAF(void, OptoRuntime::profile_receiver_type_C(DataLayout* data, oopDesc* receiver))
1211 if (receiver == NULL) return;
1212 Klass* receiver_klass = receiver->klass();
1213
1214 intptr_t* mdp = ((intptr_t*)(data)) + DataLayout::header_size_in_cells();
1215 int empty_row = -1; // free row, if any is encountered
1216
1217 // ReceiverTypeData* vc = new ReceiverTypeData(mdp);
1218 for (uint row = 0; row < ReceiverTypeData::row_limit(); row++) {
1219 // if (vc->receiver(row) == receiver_klass)
1220 int receiver_off = ReceiverTypeData::receiver_cell_index(row);
1221 intptr_t row_recv = *(mdp + receiver_off);
1222 if (row_recv == (intptr_t) receiver_klass) {
1223 // vc->set_receiver_count(row, vc->receiver_count(row) + DataLayout::counter_increment);
1224 int count_off = ReceiverTypeData::receiver_count_cell_index(row);
1225 *(mdp + count_off) += DataLayout::counter_increment;
1226 return;
1227 } else if (row_recv == 0) {
1228 // else if (vc->receiver(row) == NULL)
1229 empty_row = (int) row;
1230 }
1231 }
1232
1233 if (empty_row != -1) {
1234 int receiver_off = ReceiverTypeData::receiver_cell_index(empty_row);
1235 // vc->set_receiver(empty_row, receiver_klass);
1236 *(mdp + receiver_off) = (intptr_t) receiver_klass;
1237 // vc->set_receiver_count(empty_row, DataLayout::counter_increment);
1238 int count_off = ReceiverTypeData::receiver_count_cell_index(empty_row);
1239 *(mdp + count_off) = DataLayout::counter_increment;
1240 } else {
1241 // Receiver did not match any saved receiver and there is no empty row for it.
1242 // Increment total counter to indicate polymorphic case.
1243 intptr_t* count_p = (intptr_t*)(((uint8_t*)(data)) + in_bytes(CounterData::count_offset()));
1244 *count_p += DataLayout::counter_increment;
1245 }
1246 JRT_END
1247
1248 //-------------------------------------------------------------------------------------
1249 // register policy
1250
1251 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1252 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1253 switch (register_save_policy[reg]) {
1254 case 'C': return false; //SOC
1255 case 'E': return true ; //SOE
1256 case 'N': return false; //NS
1257 case 'A': return false; //AS
1258 }
1259 ShouldNotReachHere();
1260 return false;
1261 }
1262
1263 //-----------------------------------------------------------------------
1264 // Exceptions
1265 //
1266
1267 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1268
1269 // The method is an entry that is always called by a C++ method not
1270 // directly from compiled code. Compiled code will call the C++ method following.
1271 // We can't allow async exception to be installed during exception processing.
1272 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* thread, nmethod* &nm))
1273
1274 // Do not confuse exception_oop with pending_exception. The exception_oop
1275 // is only used to pass arguments into the method. Not for general
1276 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1277 // the runtime stubs checks this on exit.
1278 assert(thread->exception_oop() != NULL, "exception oop is found");
1279 address handler_address = NULL;
1280
1281 Handle exception(thread, thread->exception_oop());
1282 address pc = thread->exception_pc();
1283
1284 // Clear out the exception oop and pc since looking up an
1285 // exception handler can cause class loading, which might throw an
1286 // exception and those fields are expected to be clear during
1287 // normal bytecode execution.
1288 thread->clear_exception_oop_and_pc();
1289
1290 LogTarget(Info, exceptions) lt;
1291 if (lt.is_enabled()) {
1292 ResourceMark rm;
1293 LogStream ls(lt);
1294 trace_exception(&ls, exception(), pc, "");
1295 }
1296
1297 // for AbortVMOnException flag
1298 Exceptions::debug_check_abort(exception);
1299
1300 #ifdef ASSERT
1301 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
1302 // should throw an exception here
1303 ShouldNotReachHere();
1304 }
1305 #endif
1306
1307 // new exception handling: this method is entered only from adapters
1308 // exceptions from compiled java methods are handled in compiled code
1309 // using rethrow node
1310
1311 nm = CodeCache::find_nmethod(pc);
1312 assert(nm != NULL, "No NMethod found");
1313 if (nm->is_native_method()) {
1314 fatal("Native method should not have path to exception handling");
1315 } else {
1316 // we are switching to old paradigm: search for exception handler in caller_frame
1317 // instead in exception handler of caller_frame.sender()
1318
1319 if (JvmtiExport::can_post_on_exceptions()) {
1320 // "Full-speed catching" is not necessary here,
1321 // since we're notifying the VM on every catch.
1322 // Force deoptimization and the rest of the lookup
1323 // will be fine.
1324 deoptimize_caller_frame(thread);
1325 }
1326
1327 // Check the stack guard pages. If enabled, look for handler in this frame;
1328 // otherwise, forcibly unwind the frame.
1329 //
1330 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1331 bool force_unwind = !thread->reguard_stack();
1332 bool deopting = false;
1333 if (nm->is_deopt_pc(pc)) {
1334 deopting = true;
1335 RegisterMap map(thread, false);
1336 frame deoptee = thread->last_frame().sender(&map);
1337 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1338 // Adjust the pc back to the original throwing pc
1339 pc = deoptee.pc();
1340 }
1341
1342 // If we are forcing an unwind because of stack overflow then deopt is
1343 // irrelevant since we are throwing the frame away anyway.
1344
1345 if (deopting && !force_unwind) {
1346 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1347 } else {
1348
1349 handler_address =
1350 force_unwind ? NULL : nm->handler_for_exception_and_pc(exception, pc);
1351
1352 if (handler_address == NULL) {
1353 bool recursive_exception = false;
1354 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1355 assert (handler_address != NULL, "must have compiled handler");
1356 // Update the exception cache only when the unwind was not forced
1357 // and there didn't happen another exception during the computation of the
1358 // compiled exception handler. Checking for exception oop equality is not
1359 // sufficient because some exceptions are pre-allocated and reused.
1360 if (!force_unwind && !recursive_exception) {
1361 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
1362 }
1363 } else {
1364 #ifdef ASSERT
1365 bool recursive_exception = false;
1366 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1367 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
1368 p2i(handler_address), p2i(computed_address));
1369 #endif
1370 }
1371 }
1372
1373 thread->set_exception_pc(pc);
1374 thread->set_exception_handler_pc(handler_address);
1375
1376 // Check if the exception PC is a MethodHandle call site.
1377 thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
1378 }
1379
1380 // Restore correct return pc. Was saved above.
1381 thread->set_exception_oop(exception());
1382 return handler_address;
1383
1384 JRT_END
1385
1386 // We are entering here from exception_blob
1387 // If there is a compiled exception handler in this method, we will continue there;
1388 // otherwise we will unwind the stack and continue at the caller of top frame method
1389 // Note we enter without the usual JRT wrapper. We will call a helper routine that
1390 // will do the normal VM entry. We do it this way so that we can see if the nmethod
1391 // we looked up the handler for has been deoptimized in the meantime. If it has been
1392 // we must not use the handler and instead return the deopt blob.
1393 address OptoRuntime::handle_exception_C(JavaThread* thread) {
1394 //
1395 // We are in Java not VM and in debug mode we have a NoHandleMark
1396 //
1397 #ifndef PRODUCT
1398 SharedRuntime::_find_handler_ctr++; // find exception handler
1399 #endif
1400 debug_only(NoHandleMark __hm;)
1401 nmethod* nm = NULL;
1402 address handler_address = NULL;
1403 {
1404 // Enter the VM
1405
1406 ResetNoHandleMark rnhm;
1407 handler_address = handle_exception_C_helper(thread, nm);
1408 }
1409
1410 // Back in java: Use no oops, DON'T safepoint
1411
1412 // Now check to see if the handler we are returning is in a now
1413 // deoptimized frame
1414
1415 if (nm != NULL) {
1416 RegisterMap map(thread, false);
1417 frame caller = thread->last_frame().sender(&map);
1418 #ifdef ASSERT
1419 assert(caller.is_compiled_frame(), "must be");
1420 #endif // ASSERT
1421 if (caller.is_deoptimized_frame()) {
1422 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1423 }
1424 }
1425 return handler_address;
1426 }
1427
1428 //------------------------------rethrow----------------------------------------
1429 // We get here after compiled code has executed a 'RethrowNode'. The callee
1430 // is either throwing or rethrowing an exception. The callee-save registers
1431 // have been restored, synchronized objects have been unlocked and the callee
1432 // stack frame has been removed. The return address was passed in.
1433 // Exception oop is passed as the 1st argument. This routine is then called
1434 // from the stub. On exit, we know where to jump in the caller's code.
1435 // After this C code exits, the stub will pop his frame and end in a jump
1436 // (instead of a return). We enter the caller's default handler.
1437 //
1438 // This must be JRT_LEAF:
1439 // - caller will not change its state as we cannot block on exit,
1440 // therefore raw_exception_handler_for_return_address is all it takes
1441 // to handle deoptimized blobs
1442 //
1443 // However, there needs to be a safepoint check in the middle! So compiled
1444 // safepoints are completely watertight.
1445 //
1446 // Thus, it cannot be a leaf since it contains the NoGCVerifier.
1447 //
1448 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
1449 //
1450 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
1451 #ifndef PRODUCT
1452 SharedRuntime::_rethrow_ctr++; // count rethrows
1453 #endif
1454 assert (exception != NULL, "should have thrown a NULLPointerException");
1455 #ifdef ASSERT
1456 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
1457 // should throw an exception here
1458 ShouldNotReachHere();
1459 }
1460 #endif
1461
1462 thread->set_vm_result(exception);
1463 // Frame not compiled (handles deoptimization blob)
1464 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
1465 }
1466
1467
1468 const TypeFunc *OptoRuntime::rethrow_Type() {
1469 // create input type (domain)
1470 const Type **fields = TypeTuple::fields(1);
1471 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
1472 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1473
1474 // create result type (range)
1475 fields = TypeTuple::fields(1);
1476 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
1477 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1478
1479 return TypeFunc::make(domain, range);
1480 }
1481
1482
1483 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
1484 // Deoptimize the caller before continuing, as the compiled
1485 // exception handler table may not be valid.
1486 if (!StressCompiledExceptionHandlers && doit) {
1487 deoptimize_caller_frame(thread);
1488 }
1489 }
1490
1491 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
1492 // Called from within the owner thread, so no need for safepoint
1493 RegisterMap reg_map(thread);
1494 frame stub_frame = thread->last_frame();
1495 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1496 frame caller_frame = stub_frame.sender(®_map);
1497
1498 // Deoptimize the caller frame.
1499 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1500 }
1501
1502
1503 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
1504 // Called from within the owner thread, so no need for safepoint
1505 RegisterMap reg_map(thread);
1506 frame stub_frame = thread->last_frame();
1507 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1508 frame caller_frame = stub_frame.sender(®_map);
1509 return caller_frame.is_deoptimized_frame();
1510 }
1511
1512
1513 const TypeFunc *OptoRuntime::register_finalizer_Type() {
1514 // create input type (domain)
1515 const Type **fields = TypeTuple::fields(1);
1516 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
1517 // // The JavaThread* is passed to each routine as the last argument
1518 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
1519 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1520
1521 // create result type (range)
1522 fields = TypeTuple::fields(0);
1523
1524 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1525
1526 return TypeFunc::make(domain,range);
1527 }
1528
1529
1530 //-----------------------------------------------------------------------------
1531 // Dtrace support. entry and exit probes have the same signature
1532 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
1533 // create input type (domain)
1534 const Type **fields = TypeTuple::fields(2);
1535 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1536 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
1537 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1538
1539 // create result type (range)
1540 fields = TypeTuple::fields(0);
1541
1542 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1543
1544 return TypeFunc::make(domain,range);
1545 }
1546
1547 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
1548 // create input type (domain)
1549 const Type **fields = TypeTuple::fields(2);
1550 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1551 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
1552
1553 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1554
1555 // create result type (range)
1556 fields = TypeTuple::fields(0);
1557
1558 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1559
1560 return TypeFunc::make(domain,range);
1561 }
1562
1563
1564 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* thread))
1565 assert(oopDesc::is_oop(obj), "must be a valid oop");
1566 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1567 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1568 JRT_END
1569
1570 //-----------------------------------------------------------------------------
1571
1572 NamedCounter * volatile OptoRuntime::_named_counters = NULL;
1573
1574 //
1575 // dump the collected NamedCounters.
1576 //
1577 void OptoRuntime::print_named_counters() {
1578 int total_lock_count = 0;
1579 int eliminated_lock_count = 0;
1580
1581 NamedCounter* c = _named_counters;
1582 while (c) {
1583 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
1584 int count = c->count();
1585 if (count > 0) {
1586 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
1587 if (Verbose) {
1588 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
1589 }
1590 total_lock_count += count;
1591 if (eliminated) {
1592 eliminated_lock_count += count;
1593 }
1594 }
1595 } else if (c->tag() == NamedCounter::BiasedLockingCounter) {
1596 BiasedLockingCounters* blc = ((BiasedLockingNamedCounter*)c)->counters();
1597 if (blc->nonzero()) {
1598 tty->print_cr("%s", c->name());
1599 blc->print_on(tty);
1600 }
1601 #if INCLUDE_RTM_OPT
1602 } else if (c->tag() == NamedCounter::RTMLockingCounter) {
1603 RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters();
1604 if (rlc->nonzero()) {
1605 tty->print_cr("%s", c->name());
1606 rlc->print_on(tty);
1607 }
1608 #endif
1609 }
1610 c = c->next();
1611 }
1612 if (total_lock_count > 0) {
1613 tty->print_cr("dynamic locks: %d", total_lock_count);
1614 if (eliminated_lock_count) {
1615 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
1616 (int)(eliminated_lock_count * 100.0 / total_lock_count));
1617 }
1618 }
1619 }
1620
1621 //
1622 // Allocate a new NamedCounter. The JVMState is used to generate the
1623 // name which consists of method@line for the inlining tree.
1624 //
1625
1626 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
1627 int max_depth = youngest_jvms->depth();
1628
1629 // Visit scopes from youngest to oldest.
1630 bool first = true;
1631 stringStream st;
1632 for (int depth = max_depth; depth >= 1; depth--) {
1633 JVMState* jvms = youngest_jvms->of_depth(depth);
1634 ciMethod* m = jvms->has_method() ? jvms->method() : NULL;
1635 if (!first) {
1636 st.print(" ");
1637 } else {
1638 first = false;
1639 }
1640 int bci = jvms->bci();
1641 if (bci < 0) bci = 0;
1642 st.print("%s.%s@%d", m->holder()->name()->as_utf8(), m->name()->as_utf8(), bci);
1643 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
1644 }
1645 NamedCounter* c;
1646 if (tag == NamedCounter::BiasedLockingCounter) {
1647 c = new BiasedLockingNamedCounter(st.as_string());
1648 } else if (tag == NamedCounter::RTMLockingCounter) {
1649 c = new RTMLockingNamedCounter(st.as_string());
1650 } else {
1651 c = new NamedCounter(st.as_string(), tag);
1652 }
1653
1654 // atomically add the new counter to the head of the list. We only
1655 // add counters so this is safe.
1656 NamedCounter* head;
1657 do {
1658 c->set_next(NULL);
1659 head = _named_counters;
1660 c->set_next(head);
1661 } while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head);
1662 return c;
1663 }
1664
1665 int trace_exception_counter = 0;
1666 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
1667 trace_exception_counter++;
1668 stringStream tempst;
1669
1670 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
1671 exception_oop->print_value_on(&tempst);
1672 tempst.print(" in ");
1673 CodeBlob* blob = CodeCache::find_blob(exception_pc);
1674 if (blob->is_compiled()) {
1675 CompiledMethod* cm = blob->as_compiled_method_or_null();
1676 cm->method()->print_value_on(&tempst);
1677 } else if (blob->is_runtime_stub()) {
1678 tempst.print("<runtime-stub>");
1679 } else {
1680 tempst.print("<unknown>");
1681 }
1682 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
1683 tempst.print("]");
1684
1685 st->print_raw_cr(tempst.as_string());
1686 }
--- EOF ---