1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "code/scopeDesc.hpp" 34 #include "code/vtableStubs.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/oopMap.hpp" 37 #include "gc/g1/heapRegion.hpp" 38 #include "gc/shared/barrierSet.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "gc/shared/gcLocker.inline.hpp" 41 #include "interpreter/bytecode.hpp" 42 #include "interpreter/interpreter.hpp" 43 #include "interpreter/linkResolver.hpp" 44 #include "logging/log.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/oopFactory.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/objArrayKlass.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "oops/typeArrayOop.inline.hpp" 51 #include "opto/ad.hpp" 52 #include "opto/addnode.hpp" 53 #include "opto/callnode.hpp" 54 #include "opto/cfgnode.hpp" 55 #include "opto/graphKit.hpp" 56 #include "opto/machnode.hpp" 57 #include "opto/matcher.hpp" 58 #include "opto/memnode.hpp" 59 #include "opto/mulnode.hpp" 60 #include "opto/runtime.hpp" 61 #include "opto/subnode.hpp" 62 #include "runtime/atomic.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/interfaceSupport.inline.hpp" 65 #include "runtime/javaCalls.hpp" 66 #include "runtime/sharedRuntime.hpp" 67 #include "runtime/signature.hpp" 68 #include "runtime/threadCritical.hpp" 69 #include "runtime/vframe.hpp" 70 #include "runtime/vframeArray.hpp" 71 #include "runtime/vframe_hp.hpp" 72 #include "utilities/copy.hpp" 73 #include "utilities/preserveException.hpp" 74 75 76 // For debugging purposes: 77 // To force FullGCALot inside a runtime function, add the following two lines 78 // 79 // Universe::release_fullgc_alot_dummy(); 80 // MarkSweep::invoke(0, "Debugging"); 81 // 82 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 83 84 85 86 87 // Compiled code entry points 88 address OptoRuntime::_new_instance_Java = NULL; 89 address OptoRuntime::_new_array_Java = NULL; 90 address OptoRuntime::_new_array_nozero_Java = NULL; 91 address OptoRuntime::_multianewarray2_Java = NULL; 92 address OptoRuntime::_multianewarray3_Java = NULL; 93 address OptoRuntime::_multianewarray4_Java = NULL; 94 address OptoRuntime::_multianewarray5_Java = NULL; 95 address OptoRuntime::_multianewarrayN_Java = NULL; 96 address OptoRuntime::_g1_wb_pre_Java = NULL; 97 address OptoRuntime::_g1_wb_post_Java = NULL; 98 address OptoRuntime::_vtable_must_compile_Java = NULL; 99 address OptoRuntime::_complete_monitor_locking_Java = NULL; 100 address OptoRuntime::_monitor_notify_Java = NULL; 101 address OptoRuntime::_monitor_notifyAll_Java = NULL; 102 address OptoRuntime::_rethrow_Java = NULL; 103 104 address OptoRuntime::_slow_arraycopy_Java = NULL; 105 address OptoRuntime::_register_finalizer_Java = NULL; 106 107 ExceptionBlob* OptoRuntime::_exception_blob; 108 109 // This should be called in an assertion at the start of OptoRuntime routines 110 // which are entered from compiled code (all of them) 111 #ifdef ASSERT 112 static bool check_compiled_frame(JavaThread* thread) { 113 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 114 RegisterMap map(thread, false); 115 frame caller = thread->last_frame().sender(&map); 116 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 117 return true; 118 } 119 #endif // ASSERT 120 121 122 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \ 123 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \ 124 if (var == NULL) { return false; } 125 126 bool OptoRuntime::generate(ciEnv* env) { 127 128 generate_exception_blob(); 129 130 // Note: tls: Means fetching the return oop out of the thread-local storage 131 // 132 // variable/name type-function-gen , runtime method ,fncy_jp, tls,save_args,retpc 133 // ------------------------------------------------------------------------------------------------------------------------------- 134 gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true , false, false); 135 gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true , false, false); 136 gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true , false, false); 137 gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true , false, false); 138 gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true , false, false); 139 gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true , false, false); 140 gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true , false, false); 141 gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true , false, false); 142 gen(env, _g1_wb_pre_Java , g1_wb_pre_Type , SharedRuntime::g1_wb_pre , 0 , false, false, false); 143 gen(env, _g1_wb_post_Java , g1_wb_post_Type , SharedRuntime::g1_wb_post , 0 , false, false, false); 144 gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false, false); 145 gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false, false); 146 gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false, false); 147 gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , false, true ); 148 149 gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false); 150 gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false, false); 151 152 return true; 153 } 154 155 #undef gen 156 157 158 // Helper method to do generation of RunTimeStub's 159 address OptoRuntime::generate_stub( ciEnv* env, 160 TypeFunc_generator gen, address C_function, 161 const char *name, int is_fancy_jump, 162 bool pass_tls, 163 bool save_argument_registers, 164 bool return_pc) { 165 166 // Matching the default directive, we currently have no method to match. 167 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization)); 168 ResourceMark rm; 169 Compile C( env, gen, C_function, name, is_fancy_jump, pass_tls, save_argument_registers, return_pc, directive); 170 DirectivesStack::release(directive); 171 return C.stub_entry_point(); 172 } 173 174 const char* OptoRuntime::stub_name(address entry) { 175 #ifndef PRODUCT 176 CodeBlob* cb = CodeCache::find_blob(entry); 177 RuntimeStub* rs =(RuntimeStub *)cb; 178 assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub"); 179 return rs->name(); 180 #else 181 // Fast implementation for product mode (maybe it should be inlined too) 182 return "runtime stub"; 183 #endif 184 } 185 186 187 //============================================================================= 188 // Opto compiler runtime routines 189 //============================================================================= 190 191 192 //=============================allocation====================================== 193 // We failed the fast-path allocation. Now we need to do a scavenge or GC 194 // and try allocation again. 195 196 // object allocation 197 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread)) 198 JRT_BLOCK; 199 #ifndef PRODUCT 200 SharedRuntime::_new_instance_ctr++; // new instance requires GC 201 #endif 202 assert(check_compiled_frame(thread), "incorrect caller"); 203 JvmtiSampledObjectAllocEventCollector collector; 204 205 // These checks are cheap to make and support reflective allocation. 206 int lh = klass->layout_helper(); 207 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 208 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 209 klass->check_valid_for_instantiation(false, THREAD); 210 if (!HAS_PENDING_EXCEPTION) { 211 InstanceKlass::cast(klass)->initialize(THREAD); 212 } 213 } 214 215 if (!HAS_PENDING_EXCEPTION) { 216 // Scavenge and allocate an instance. 217 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 218 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 219 thread->set_vm_result(result); 220 221 // Pass oops back through thread local storage. Our apparent type to Java 222 // is that we return an oop, but we can block on exit from this routine and 223 // a GC can trash the oop in C's return register. The generated stub will 224 // fetch the oop from TLS after any possible GC. 225 } 226 227 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 228 JRT_BLOCK_END; 229 230 // inform GC that we won't do card marks for initializing writes. 231 SharedRuntime::on_slowpath_allocation_exit(thread); 232 JRT_END 233 234 235 // array allocation 236 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread *thread)) 237 JRT_BLOCK; 238 #ifndef PRODUCT 239 SharedRuntime::_new_array_ctr++; // new array requires GC 240 #endif 241 assert(check_compiled_frame(thread), "incorrect caller"); 242 JvmtiSampledObjectAllocEventCollector collector; 243 244 // Scavenge and allocate an instance. 245 oop result; 246 247 if (array_type->is_typeArray_klass()) { 248 // The oopFactory likes to work with the element type. 249 // (We could bypass the oopFactory, since it doesn't add much value.) 250 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 251 result = oopFactory::new_typeArray(elem_type, len, THREAD); 252 } else { 253 // Although the oopFactory likes to work with the elem_type, 254 // the compiler prefers the array_type, since it must already have 255 // that latter value in hand for the fast path. 256 Handle holder(THREAD, array_type->klass_holder()); // keep the array klass alive 257 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 258 result = oopFactory::new_objArray(elem_type, len, THREAD); 259 } 260 261 // Pass oops back through thread local storage. Our apparent type to Java 262 // is that we return an oop, but we can block on exit from this routine and 263 // a GC can trash the oop in C's return register. The generated stub will 264 // fetch the oop from TLS after any possible GC. 265 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 266 thread->set_vm_result(result); 267 JRT_BLOCK_END; 268 269 // inform GC that we won't do card marks for initializing writes. 270 SharedRuntime::on_slowpath_allocation_exit(thread); 271 JRT_END 272 273 // array allocation without zeroing 274 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread *thread)) 275 JRT_BLOCK; 276 #ifndef PRODUCT 277 SharedRuntime::_new_array_ctr++; // new array requires GC 278 #endif 279 assert(check_compiled_frame(thread), "incorrect caller"); 280 281 // Scavenge and allocate an instance. 282 oop result; 283 284 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 285 // The oopFactory likes to work with the element type. 286 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 287 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 288 289 // Pass oops back through thread local storage. Our apparent type to Java 290 // is that we return an oop, but we can block on exit from this routine and 291 // a GC can trash the oop in C's return register. The generated stub will 292 // fetch the oop from TLS after any possible GC. 293 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 294 thread->set_vm_result(result); 295 JRT_BLOCK_END; 296 297 298 // inform GC that we won't do card marks for initializing writes. 299 SharedRuntime::on_slowpath_allocation_exit(thread); 300 301 oop result = thread->vm_result(); 302 if ((len > 0) && (result != NULL) && 303 is_deoptimized_caller_frame(thread)) { 304 // Zero array here if the caller is deoptimized. 305 int size = ((typeArrayOop)result)->object_size(); 306 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 307 const size_t hs = arrayOopDesc::header_size(elem_type); 308 // Align to next 8 bytes to avoid trashing arrays's length. 309 const size_t aligned_hs = align_object_offset(hs); 310 HeapWord* obj = (HeapWord*)result; 311 if (aligned_hs > hs) { 312 Copy::zero_to_words(obj+hs, aligned_hs-hs); 313 } 314 // Optimized zeroing. 315 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 316 } 317 318 JRT_END 319 320 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 321 322 // multianewarray for 2 dimensions 323 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread *thread)) 324 JvmtiSampledObjectAllocEventCollector collector; 325 #ifndef PRODUCT 326 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 327 #endif 328 assert(check_compiled_frame(thread), "incorrect caller"); 329 assert(elem_type->is_klass(), "not a class"); 330 jint dims[2]; 331 dims[0] = len1; 332 dims[1] = len2; 333 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 334 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 335 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 336 thread->set_vm_result(obj); 337 JRT_END 338 339 // multianewarray for 3 dimensions 340 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread *thread)) 341 JvmtiSampledObjectAllocEventCollector collector; 342 #ifndef PRODUCT 343 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 344 #endif 345 assert(check_compiled_frame(thread), "incorrect caller"); 346 assert(elem_type->is_klass(), "not a class"); 347 jint dims[3]; 348 dims[0] = len1; 349 dims[1] = len2; 350 dims[2] = len3; 351 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 352 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 353 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 354 thread->set_vm_result(obj); 355 JRT_END 356 357 // multianewarray for 4 dimensions 358 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread *thread)) 359 JvmtiSampledObjectAllocEventCollector collector; 360 #ifndef PRODUCT 361 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 362 #endif 363 assert(check_compiled_frame(thread), "incorrect caller"); 364 assert(elem_type->is_klass(), "not a class"); 365 jint dims[4]; 366 dims[0] = len1; 367 dims[1] = len2; 368 dims[2] = len3; 369 dims[3] = len4; 370 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 371 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 372 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 373 thread->set_vm_result(obj); 374 JRT_END 375 376 // multianewarray for 5 dimensions 377 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread *thread)) 378 JvmtiSampledObjectAllocEventCollector collector; 379 #ifndef PRODUCT 380 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 381 #endif 382 assert(check_compiled_frame(thread), "incorrect caller"); 383 assert(elem_type->is_klass(), "not a class"); 384 jint dims[5]; 385 dims[0] = len1; 386 dims[1] = len2; 387 dims[2] = len3; 388 dims[3] = len4; 389 dims[4] = len5; 390 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 391 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 392 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 393 thread->set_vm_result(obj); 394 JRT_END 395 396 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread *thread)) 397 assert(check_compiled_frame(thread), "incorrect caller"); 398 assert(elem_type->is_klass(), "not a class"); 399 assert(oop(dims)->is_typeArray(), "not an array"); 400 401 ResourceMark rm; 402 jint len = dims->length(); 403 assert(len > 0, "Dimensions array should contain data"); 404 jint *j_dims = typeArrayOop(dims)->int_at_addr(0); 405 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 406 Copy::conjoint_jints_atomic(j_dims, c_dims, len); 407 408 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 409 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 410 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 411 thread->set_vm_result(obj); 412 JRT_END 413 414 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread *thread)) 415 416 // Very few notify/notifyAll operations find any threads on the waitset, so 417 // the dominant fast-path is to simply return. 418 // Relatedly, it's critical that notify/notifyAll be fast in order to 419 // reduce lock hold times. 420 if (!SafepointSynchronize::is_synchronizing()) { 421 if (ObjectSynchronizer::quick_notify(obj, thread, false)) { 422 return; 423 } 424 } 425 426 // This is the case the fast-path above isn't provisioned to handle. 427 // The fast-path is designed to handle frequently arising cases in an efficient manner. 428 // (The fast-path is just a degenerate variant of the slow-path). 429 // Perform the dreaded state transition and pass control into the slow-path. 430 JRT_BLOCK; 431 Handle h_obj(THREAD, obj); 432 ObjectSynchronizer::notify(h_obj, CHECK); 433 JRT_BLOCK_END; 434 JRT_END 435 436 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread *thread)) 437 438 if (!SafepointSynchronize::is_synchronizing() ) { 439 if (ObjectSynchronizer::quick_notify(obj, thread, true)) { 440 return; 441 } 442 } 443 444 // This is the case the fast-path above isn't provisioned to handle. 445 // The fast-path is designed to handle frequently arising cases in an efficient manner. 446 // (The fast-path is just a degenerate variant of the slow-path). 447 // Perform the dreaded state transition and pass control into the slow-path. 448 JRT_BLOCK; 449 Handle h_obj(THREAD, obj); 450 ObjectSynchronizer::notifyall(h_obj, CHECK); 451 JRT_BLOCK_END; 452 JRT_END 453 454 const TypeFunc *OptoRuntime::new_instance_Type() { 455 // create input type (domain) 456 const Type **fields = TypeTuple::fields(1); 457 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 458 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 459 460 // create result type (range) 461 fields = TypeTuple::fields(1); 462 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 463 464 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 465 466 return TypeFunc::make(domain, range); 467 } 468 469 470 const TypeFunc *OptoRuntime::athrow_Type() { 471 // create input type (domain) 472 const Type **fields = TypeTuple::fields(1); 473 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 474 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 475 476 // create result type (range) 477 fields = TypeTuple::fields(0); 478 479 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 480 481 return TypeFunc::make(domain, range); 482 } 483 484 485 const TypeFunc *OptoRuntime::new_array_Type() { 486 // create input type (domain) 487 const Type **fields = TypeTuple::fields(2); 488 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 489 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 490 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 491 492 // create result type (range) 493 fields = TypeTuple::fields(1); 494 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 495 496 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 497 498 return TypeFunc::make(domain, range); 499 } 500 501 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) { 502 // create input type (domain) 503 const int nargs = ndim + 1; 504 const Type **fields = TypeTuple::fields(nargs); 505 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 506 for( int i = 1; i < nargs; i++ ) 507 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 508 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 509 510 // create result type (range) 511 fields = TypeTuple::fields(1); 512 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 513 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 514 515 return TypeFunc::make(domain, range); 516 } 517 518 const TypeFunc *OptoRuntime::multianewarray2_Type() { 519 return multianewarray_Type(2); 520 } 521 522 const TypeFunc *OptoRuntime::multianewarray3_Type() { 523 return multianewarray_Type(3); 524 } 525 526 const TypeFunc *OptoRuntime::multianewarray4_Type() { 527 return multianewarray_Type(4); 528 } 529 530 const TypeFunc *OptoRuntime::multianewarray5_Type() { 531 return multianewarray_Type(5); 532 } 533 534 const TypeFunc *OptoRuntime::multianewarrayN_Type() { 535 // create input type (domain) 536 const Type **fields = TypeTuple::fields(2); 537 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 538 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 539 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 540 541 // create result type (range) 542 fields = TypeTuple::fields(1); 543 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 544 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 545 546 return TypeFunc::make(domain, range); 547 } 548 549 const TypeFunc *OptoRuntime::g1_wb_pre_Type() { 550 const Type **fields = TypeTuple::fields(2); 551 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 552 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 553 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 554 555 // create result type (range) 556 fields = TypeTuple::fields(0); 557 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 558 559 return TypeFunc::make(domain, range); 560 } 561 562 const TypeFunc *OptoRuntime::g1_wb_post_Type() { 563 564 const Type **fields = TypeTuple::fields(2); 565 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr 566 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 567 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 568 569 // create result type (range) 570 fields = TypeTuple::fields(0); 571 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 572 573 return TypeFunc::make(domain, range); 574 } 575 576 const TypeFunc *OptoRuntime::uncommon_trap_Type() { 577 // create input type (domain) 578 const Type **fields = TypeTuple::fields(1); 579 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 580 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 581 582 // create result type (range) 583 fields = TypeTuple::fields(0); 584 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 585 586 return TypeFunc::make(domain, range); 587 } 588 589 //----------------------------------------------------------------------------- 590 // Monitor Handling 591 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() { 592 // create input type (domain) 593 const Type **fields = TypeTuple::fields(2); 594 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 595 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 596 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 597 598 // create result type (range) 599 fields = TypeTuple::fields(0); 600 601 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 602 603 return TypeFunc::make(domain,range); 604 } 605 606 607 //----------------------------------------------------------------------------- 608 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() { 609 // create input type (domain) 610 const Type **fields = TypeTuple::fields(3); 611 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 612 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 613 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 614 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 615 616 // create result type (range) 617 fields = TypeTuple::fields(0); 618 619 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 620 621 return TypeFunc::make(domain, range); 622 } 623 624 const TypeFunc *OptoRuntime::monitor_notify_Type() { 625 // create input type (domain) 626 const Type **fields = TypeTuple::fields(1); 627 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 628 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 629 630 // create result type (range) 631 fields = TypeTuple::fields(0); 632 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 633 return TypeFunc::make(domain, range); 634 } 635 636 const TypeFunc* OptoRuntime::flush_windows_Type() { 637 // create input type (domain) 638 const Type** fields = TypeTuple::fields(1); 639 fields[TypeFunc::Parms+0] = NULL; // void 640 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 641 642 // create result type 643 fields = TypeTuple::fields(1); 644 fields[TypeFunc::Parms+0] = NULL; // void 645 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 646 647 return TypeFunc::make(domain, range); 648 } 649 650 const TypeFunc* OptoRuntime::l2f_Type() { 651 // create input type (domain) 652 const Type **fields = TypeTuple::fields(2); 653 fields[TypeFunc::Parms+0] = TypeLong::LONG; 654 fields[TypeFunc::Parms+1] = Type::HALF; 655 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 656 657 // create result type (range) 658 fields = TypeTuple::fields(1); 659 fields[TypeFunc::Parms+0] = Type::FLOAT; 660 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 661 662 return TypeFunc::make(domain, range); 663 } 664 665 const TypeFunc* OptoRuntime::modf_Type() { 666 const Type **fields = TypeTuple::fields(2); 667 fields[TypeFunc::Parms+0] = Type::FLOAT; 668 fields[TypeFunc::Parms+1] = Type::FLOAT; 669 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 670 671 // create result type (range) 672 fields = TypeTuple::fields(1); 673 fields[TypeFunc::Parms+0] = Type::FLOAT; 674 675 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 676 677 return TypeFunc::make(domain, range); 678 } 679 680 const TypeFunc *OptoRuntime::Math_D_D_Type() { 681 // create input type (domain) 682 const Type **fields = TypeTuple::fields(2); 683 // Symbol* name of class to be loaded 684 fields[TypeFunc::Parms+0] = Type::DOUBLE; 685 fields[TypeFunc::Parms+1] = Type::HALF; 686 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 687 688 // create result type (range) 689 fields = TypeTuple::fields(2); 690 fields[TypeFunc::Parms+0] = Type::DOUBLE; 691 fields[TypeFunc::Parms+1] = Type::HALF; 692 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 693 694 return TypeFunc::make(domain, range); 695 } 696 697 const TypeFunc* OptoRuntime::Math_DD_D_Type() { 698 const Type **fields = TypeTuple::fields(4); 699 fields[TypeFunc::Parms+0] = Type::DOUBLE; 700 fields[TypeFunc::Parms+1] = Type::HALF; 701 fields[TypeFunc::Parms+2] = Type::DOUBLE; 702 fields[TypeFunc::Parms+3] = Type::HALF; 703 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 704 705 // create result type (range) 706 fields = TypeTuple::fields(2); 707 fields[TypeFunc::Parms+0] = Type::DOUBLE; 708 fields[TypeFunc::Parms+1] = Type::HALF; 709 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 710 711 return TypeFunc::make(domain, range); 712 } 713 714 //-------------- currentTimeMillis, currentTimeNanos, etc 715 716 const TypeFunc* OptoRuntime::void_long_Type() { 717 // create input type (domain) 718 const Type **fields = TypeTuple::fields(0); 719 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 720 721 // create result type (range) 722 fields = TypeTuple::fields(2); 723 fields[TypeFunc::Parms+0] = TypeLong::LONG; 724 fields[TypeFunc::Parms+1] = Type::HALF; 725 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 726 727 return TypeFunc::make(domain, range); 728 } 729 730 // arraycopy stub variations: 731 enum ArrayCopyType { 732 ac_fast, // void(ptr, ptr, size_t) 733 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 734 ac_slow, // void(ptr, int, ptr, int, int) 735 ac_generic // int(ptr, int, ptr, int, int) 736 }; 737 738 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 739 // create input type (domain) 740 int num_args = (act == ac_fast ? 3 : 5); 741 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 742 int argcnt = num_args; 743 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 744 const Type** fields = TypeTuple::fields(argcnt); 745 int argp = TypeFunc::Parms; 746 fields[argp++] = TypePtr::NOTNULL; // src 747 if (num_size_args == 0) { 748 fields[argp++] = TypeInt::INT; // src_pos 749 } 750 fields[argp++] = TypePtr::NOTNULL; // dest 751 if (num_size_args == 0) { 752 fields[argp++] = TypeInt::INT; // dest_pos 753 fields[argp++] = TypeInt::INT; // length 754 } 755 while (num_size_args-- > 0) { 756 fields[argp++] = TypeX_X; // size in whatevers (size_t) 757 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 758 } 759 if (act == ac_checkcast) { 760 fields[argp++] = TypePtr::NOTNULL; // super_klass 761 } 762 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 763 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 764 765 // create result type if needed 766 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 767 fields = TypeTuple::fields(1); 768 if (retcnt == 0) 769 fields[TypeFunc::Parms+0] = NULL; // void 770 else 771 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 772 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 773 return TypeFunc::make(domain, range); 774 } 775 776 const TypeFunc* OptoRuntime::fast_arraycopy_Type() { 777 // This signature is simple: Two base pointers and a size_t. 778 return make_arraycopy_Type(ac_fast); 779 } 780 781 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() { 782 // An extension of fast_arraycopy_Type which adds type checking. 783 return make_arraycopy_Type(ac_checkcast); 784 } 785 786 const TypeFunc* OptoRuntime::slow_arraycopy_Type() { 787 // This signature is exactly the same as System.arraycopy. 788 // There are no intptr_t (int/long) arguments. 789 return make_arraycopy_Type(ac_slow); 790 } 791 792 const TypeFunc* OptoRuntime::generic_arraycopy_Type() { 793 // This signature is like System.arraycopy, except that it returns status. 794 return make_arraycopy_Type(ac_generic); 795 } 796 797 798 const TypeFunc* OptoRuntime::array_fill_Type() { 799 const Type** fields; 800 int argp = TypeFunc::Parms; 801 // create input type (domain): pointer, int, size_t 802 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 803 fields[argp++] = TypePtr::NOTNULL; 804 fields[argp++] = TypeInt::INT; 805 fields[argp++] = TypeX_X; // size in whatevers (size_t) 806 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 807 const TypeTuple *domain = TypeTuple::make(argp, fields); 808 809 // create result type 810 fields = TypeTuple::fields(1); 811 fields[TypeFunc::Parms+0] = NULL; // void 812 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 813 814 return TypeFunc::make(domain, range); 815 } 816 817 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant) 818 const TypeFunc* OptoRuntime::aescrypt_block_Type() { 819 // create input type (domain) 820 int num_args = 3; 821 if (Matcher::pass_original_key_for_aes()) { 822 num_args = 4; 823 } 824 int argcnt = num_args; 825 const Type** fields = TypeTuple::fields(argcnt); 826 int argp = TypeFunc::Parms; 827 fields[argp++] = TypePtr::NOTNULL; // src 828 fields[argp++] = TypePtr::NOTNULL; // dest 829 fields[argp++] = TypePtr::NOTNULL; // k array 830 if (Matcher::pass_original_key_for_aes()) { 831 fields[argp++] = TypePtr::NOTNULL; // original k array 832 } 833 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 834 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 835 836 // no result type needed 837 fields = TypeTuple::fields(1); 838 fields[TypeFunc::Parms+0] = NULL; // void 839 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 840 return TypeFunc::make(domain, range); 841 } 842 843 /** 844 * int updateBytesCRC32(int crc, byte* b, int len) 845 */ 846 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { 847 // create input type (domain) 848 int num_args = 3; 849 int argcnt = num_args; 850 const Type** fields = TypeTuple::fields(argcnt); 851 int argp = TypeFunc::Parms; 852 fields[argp++] = TypeInt::INT; // crc 853 fields[argp++] = TypePtr::NOTNULL; // src 854 fields[argp++] = TypeInt::INT; // len 855 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 856 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 857 858 // result type needed 859 fields = TypeTuple::fields(1); 860 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 861 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 862 return TypeFunc::make(domain, range); 863 } 864 865 /** 866 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table) 867 */ 868 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() { 869 // create input type (domain) 870 int num_args = 4; 871 int argcnt = num_args; 872 const Type** fields = TypeTuple::fields(argcnt); 873 int argp = TypeFunc::Parms; 874 fields[argp++] = TypeInt::INT; // crc 875 fields[argp++] = TypePtr::NOTNULL; // buf 876 fields[argp++] = TypeInt::INT; // len 877 fields[argp++] = TypePtr::NOTNULL; // table 878 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 879 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 880 881 // result type needed 882 fields = TypeTuple::fields(1); 883 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 884 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 885 return TypeFunc::make(domain, range); 886 } 887 888 /** 889 * int updateBytesAdler32(int adler, bytes* b, int off, int len) 890 */ 891 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() { 892 // create input type (domain) 893 int num_args = 3; 894 int argcnt = num_args; 895 const Type** fields = TypeTuple::fields(argcnt); 896 int argp = TypeFunc::Parms; 897 fields[argp++] = TypeInt::INT; // crc 898 fields[argp++] = TypePtr::NOTNULL; // src + offset 899 fields[argp++] = TypeInt::INT; // len 900 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 901 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 902 903 // result type needed 904 fields = TypeTuple::fields(1); 905 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 906 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 907 return TypeFunc::make(domain, range); 908 } 909 910 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 911 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { 912 // create input type (domain) 913 int num_args = 5; 914 if (Matcher::pass_original_key_for_aes()) { 915 num_args = 6; 916 } 917 int argcnt = num_args; 918 const Type** fields = TypeTuple::fields(argcnt); 919 int argp = TypeFunc::Parms; 920 fields[argp++] = TypePtr::NOTNULL; // src 921 fields[argp++] = TypePtr::NOTNULL; // dest 922 fields[argp++] = TypePtr::NOTNULL; // k array 923 fields[argp++] = TypePtr::NOTNULL; // r array 924 fields[argp++] = TypeInt::INT; // src len 925 if (Matcher::pass_original_key_for_aes()) { 926 fields[argp++] = TypePtr::NOTNULL; // original k array 927 } 928 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 929 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 930 931 // returning cipher len (int) 932 fields = TypeTuple::fields(1); 933 fields[TypeFunc::Parms+0] = TypeInt::INT; 934 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 935 return TypeFunc::make(domain, range); 936 } 937 938 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 939 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() { 940 // create input type (domain) 941 int num_args = 7; 942 if (Matcher::pass_original_key_for_aes()) { 943 num_args = 8; 944 } 945 int argcnt = num_args; 946 const Type** fields = TypeTuple::fields(argcnt); 947 int argp = TypeFunc::Parms; 948 fields[argp++] = TypePtr::NOTNULL; // src 949 fields[argp++] = TypePtr::NOTNULL; // dest 950 fields[argp++] = TypePtr::NOTNULL; // k array 951 fields[argp++] = TypePtr::NOTNULL; // counter array 952 fields[argp++] = TypeInt::INT; // src len 953 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 954 fields[argp++] = TypePtr::NOTNULL; // saved used addr 955 if (Matcher::pass_original_key_for_aes()) { 956 fields[argp++] = TypePtr::NOTNULL; // original k array 957 } 958 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 959 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 960 // returning cipher len (int) 961 fields = TypeTuple::fields(1); 962 fields[TypeFunc::Parms + 0] = TypeInt::INT; 963 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 964 return TypeFunc::make(domain, range); 965 } 966 967 /* 968 * void implCompress(byte[] buf, int ofs) 969 */ 970 const TypeFunc* OptoRuntime::sha_implCompress_Type() { 971 // create input type (domain) 972 int num_args = 2; 973 int argcnt = num_args; 974 const Type** fields = TypeTuple::fields(argcnt); 975 int argp = TypeFunc::Parms; 976 fields[argp++] = TypePtr::NOTNULL; // buf 977 fields[argp++] = TypePtr::NOTNULL; // state 978 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 979 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 980 981 // no result type needed 982 fields = TypeTuple::fields(1); 983 fields[TypeFunc::Parms+0] = NULL; // void 984 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 985 return TypeFunc::make(domain, range); 986 } 987 988 /* 989 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 990 */ 991 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type() { 992 // create input type (domain) 993 int num_args = 4; 994 int argcnt = num_args; 995 const Type** fields = TypeTuple::fields(argcnt); 996 int argp = TypeFunc::Parms; 997 fields[argp++] = TypePtr::NOTNULL; // buf 998 fields[argp++] = TypePtr::NOTNULL; // state 999 fields[argp++] = TypeInt::INT; // ofs 1000 fields[argp++] = TypeInt::INT; // limit 1001 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1002 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1003 1004 // returning ofs (int) 1005 fields = TypeTuple::fields(1); 1006 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1007 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1008 return TypeFunc::make(domain, range); 1009 } 1010 1011 const TypeFunc* OptoRuntime::multiplyToLen_Type() { 1012 // create input type (domain) 1013 int num_args = 6; 1014 int argcnt = num_args; 1015 const Type** fields = TypeTuple::fields(argcnt); 1016 int argp = TypeFunc::Parms; 1017 fields[argp++] = TypePtr::NOTNULL; // x 1018 fields[argp++] = TypeInt::INT; // xlen 1019 fields[argp++] = TypePtr::NOTNULL; // y 1020 fields[argp++] = TypeInt::INT; // ylen 1021 fields[argp++] = TypePtr::NOTNULL; // z 1022 fields[argp++] = TypeInt::INT; // zlen 1023 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1024 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1025 1026 // no result type needed 1027 fields = TypeTuple::fields(1); 1028 fields[TypeFunc::Parms+0] = NULL; 1029 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1030 return TypeFunc::make(domain, range); 1031 } 1032 1033 const TypeFunc* OptoRuntime::squareToLen_Type() { 1034 // create input type (domain) 1035 int num_args = 4; 1036 int argcnt = num_args; 1037 const Type** fields = TypeTuple::fields(argcnt); 1038 int argp = TypeFunc::Parms; 1039 fields[argp++] = TypePtr::NOTNULL; // x 1040 fields[argp++] = TypeInt::INT; // len 1041 fields[argp++] = TypePtr::NOTNULL; // z 1042 fields[argp++] = TypeInt::INT; // zlen 1043 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1044 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1045 1046 // no result type needed 1047 fields = TypeTuple::fields(1); 1048 fields[TypeFunc::Parms+0] = NULL; 1049 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1050 return TypeFunc::make(domain, range); 1051 } 1052 1053 // for mulAdd calls, 2 pointers and 3 ints, returning int 1054 const TypeFunc* OptoRuntime::mulAdd_Type() { 1055 // create input type (domain) 1056 int num_args = 5; 1057 int argcnt = num_args; 1058 const Type** fields = TypeTuple::fields(argcnt); 1059 int argp = TypeFunc::Parms; 1060 fields[argp++] = TypePtr::NOTNULL; // out 1061 fields[argp++] = TypePtr::NOTNULL; // in 1062 fields[argp++] = TypeInt::INT; // offset 1063 fields[argp++] = TypeInt::INT; // len 1064 fields[argp++] = TypeInt::INT; // k 1065 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1066 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1067 1068 // returning carry (int) 1069 fields = TypeTuple::fields(1); 1070 fields[TypeFunc::Parms+0] = TypeInt::INT; 1071 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1072 return TypeFunc::make(domain, range); 1073 } 1074 1075 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() { 1076 // create input type (domain) 1077 int num_args = 7; 1078 int argcnt = num_args; 1079 const Type** fields = TypeTuple::fields(argcnt); 1080 int argp = TypeFunc::Parms; 1081 fields[argp++] = TypePtr::NOTNULL; // a 1082 fields[argp++] = TypePtr::NOTNULL; // b 1083 fields[argp++] = TypePtr::NOTNULL; // n 1084 fields[argp++] = TypeInt::INT; // len 1085 fields[argp++] = TypeLong::LONG; // inv 1086 fields[argp++] = Type::HALF; 1087 fields[argp++] = TypePtr::NOTNULL; // result 1088 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1089 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1090 1091 // result type needed 1092 fields = TypeTuple::fields(1); 1093 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1094 1095 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1096 return TypeFunc::make(domain, range); 1097 } 1098 1099 const TypeFunc* OptoRuntime::montgomerySquare_Type() { 1100 // create input type (domain) 1101 int num_args = 6; 1102 int argcnt = num_args; 1103 const Type** fields = TypeTuple::fields(argcnt); 1104 int argp = TypeFunc::Parms; 1105 fields[argp++] = TypePtr::NOTNULL; // a 1106 fields[argp++] = TypePtr::NOTNULL; // n 1107 fields[argp++] = TypeInt::INT; // len 1108 fields[argp++] = TypeLong::LONG; // inv 1109 fields[argp++] = Type::HALF; 1110 fields[argp++] = TypePtr::NOTNULL; // result 1111 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1112 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1113 1114 // result type needed 1115 fields = TypeTuple::fields(1); 1116 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1117 1118 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1119 return TypeFunc::make(domain, range); 1120 } 1121 1122 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() { 1123 // create input type (domain) 1124 int num_args = 4; 1125 int argcnt = num_args; 1126 const Type** fields = TypeTuple::fields(argcnt); 1127 int argp = TypeFunc::Parms; 1128 fields[argp++] = TypePtr::NOTNULL; // obja 1129 fields[argp++] = TypePtr::NOTNULL; // objb 1130 fields[argp++] = TypeInt::INT; // length, number of elements 1131 fields[argp++] = TypeInt::INT; // log2scale, element size 1132 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1133 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1134 1135 //return mismatch index (int) 1136 fields = TypeTuple::fields(1); 1137 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1138 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1139 return TypeFunc::make(domain, range); 1140 } 1141 1142 // GHASH block processing 1143 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { 1144 int argcnt = 4; 1145 1146 const Type** fields = TypeTuple::fields(argcnt); 1147 int argp = TypeFunc::Parms; 1148 fields[argp++] = TypePtr::NOTNULL; // state 1149 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1150 fields[argp++] = TypePtr::NOTNULL; // data 1151 fields[argp++] = TypeInt::INT; // blocks 1152 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1153 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1154 1155 // result type needed 1156 fields = TypeTuple::fields(1); 1157 fields[TypeFunc::Parms+0] = NULL; // void 1158 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1159 return TypeFunc::make(domain, range); 1160 } 1161 1162 //------------- Interpreter state access for on stack replacement 1163 const TypeFunc* OptoRuntime::osr_end_Type() { 1164 // create input type (domain) 1165 const Type **fields = TypeTuple::fields(1); 1166 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1167 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1168 1169 // create result type 1170 fields = TypeTuple::fields(1); 1171 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1172 fields[TypeFunc::Parms+0] = NULL; // void 1173 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1174 return TypeFunc::make(domain, range); 1175 } 1176 1177 //-------------- methodData update helpers 1178 1179 const TypeFunc* OptoRuntime::profile_receiver_type_Type() { 1180 // create input type (domain) 1181 const Type **fields = TypeTuple::fields(2); 1182 fields[TypeFunc::Parms+0] = TypeAryPtr::NOTNULL; // methodData pointer 1183 fields[TypeFunc::Parms+1] = TypeInstPtr::BOTTOM; // receiver oop 1184 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 1185 1186 // create result type 1187 fields = TypeTuple::fields(1); 1188 fields[TypeFunc::Parms+0] = NULL; // void 1189 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1190 return TypeFunc::make(domain,range); 1191 } 1192 1193 JRT_LEAF(void, OptoRuntime::profile_receiver_type_C(DataLayout* data, oopDesc* receiver)) 1194 if (receiver == NULL) return; 1195 Klass* receiver_klass = receiver->klass(); 1196 1197 intptr_t* mdp = ((intptr_t*)(data)) + DataLayout::header_size_in_cells(); 1198 int empty_row = -1; // free row, if any is encountered 1199 1200 // ReceiverTypeData* vc = new ReceiverTypeData(mdp); 1201 for (uint row = 0; row < ReceiverTypeData::row_limit(); row++) { 1202 // if (vc->receiver(row) == receiver_klass) 1203 int receiver_off = ReceiverTypeData::receiver_cell_index(row); 1204 intptr_t row_recv = *(mdp + receiver_off); 1205 if (row_recv == (intptr_t) receiver_klass) { 1206 // vc->set_receiver_count(row, vc->receiver_count(row) + DataLayout::counter_increment); 1207 int count_off = ReceiverTypeData::receiver_count_cell_index(row); 1208 *(mdp + count_off) += DataLayout::counter_increment; 1209 return; 1210 } else if (row_recv == 0) { 1211 // else if (vc->receiver(row) == NULL) 1212 empty_row = (int) row; 1213 } 1214 } 1215 1216 if (empty_row != -1) { 1217 int receiver_off = ReceiverTypeData::receiver_cell_index(empty_row); 1218 // vc->set_receiver(empty_row, receiver_klass); 1219 *(mdp + receiver_off) = (intptr_t) receiver_klass; 1220 // vc->set_receiver_count(empty_row, DataLayout::counter_increment); 1221 int count_off = ReceiverTypeData::receiver_count_cell_index(empty_row); 1222 *(mdp + count_off) = DataLayout::counter_increment; 1223 } else { 1224 // Receiver did not match any saved receiver and there is no empty row for it. 1225 // Increment total counter to indicate polymorphic case. 1226 intptr_t* count_p = (intptr_t*)(((uint8_t*)(data)) + in_bytes(CounterData::count_offset())); 1227 *count_p += DataLayout::counter_increment; 1228 } 1229 JRT_END 1230 1231 //------------------------------------------------------------------------------------- 1232 // register policy 1233 1234 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1235 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1236 switch (register_save_policy[reg]) { 1237 case 'C': return false; //SOC 1238 case 'E': return true ; //SOE 1239 case 'N': return false; //NS 1240 case 'A': return false; //AS 1241 } 1242 ShouldNotReachHere(); 1243 return false; 1244 } 1245 1246 //----------------------------------------------------------------------- 1247 // Exceptions 1248 // 1249 1250 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1251 1252 // The method is an entry that is always called by a C++ method not 1253 // directly from compiled code. Compiled code will call the C++ method following. 1254 // We can't allow async exception to be installed during exception processing. 1255 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* thread, nmethod* &nm)) 1256 1257 // Do not confuse exception_oop with pending_exception. The exception_oop 1258 // is only used to pass arguments into the method. Not for general 1259 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1260 // the runtime stubs checks this on exit. 1261 assert(thread->exception_oop() != NULL, "exception oop is found"); 1262 address handler_address = NULL; 1263 1264 Handle exception(thread, thread->exception_oop()); 1265 address pc = thread->exception_pc(); 1266 1267 // Clear out the exception oop and pc since looking up an 1268 // exception handler can cause class loading, which might throw an 1269 // exception and those fields are expected to be clear during 1270 // normal bytecode execution. 1271 thread->clear_exception_oop_and_pc(); 1272 1273 LogTarget(Info, exceptions) lt; 1274 if (lt.is_enabled()) { 1275 ResourceMark rm; 1276 LogStream ls(lt); 1277 trace_exception(&ls, exception(), pc, ""); 1278 } 1279 1280 // for AbortVMOnException flag 1281 Exceptions::debug_check_abort(exception); 1282 1283 #ifdef ASSERT 1284 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 1285 // should throw an exception here 1286 ShouldNotReachHere(); 1287 } 1288 #endif 1289 1290 // new exception handling: this method is entered only from adapters 1291 // exceptions from compiled java methods are handled in compiled code 1292 // using rethrow node 1293 1294 nm = CodeCache::find_nmethod(pc); 1295 assert(nm != NULL, "No NMethod found"); 1296 if (nm->is_native_method()) { 1297 fatal("Native method should not have path to exception handling"); 1298 } else { 1299 // we are switching to old paradigm: search for exception handler in caller_frame 1300 // instead in exception handler of caller_frame.sender() 1301 1302 if (JvmtiExport::can_post_on_exceptions()) { 1303 // "Full-speed catching" is not necessary here, 1304 // since we're notifying the VM on every catch. 1305 // Force deoptimization and the rest of the lookup 1306 // will be fine. 1307 deoptimize_caller_frame(thread); 1308 } 1309 1310 // Check the stack guard pages. If enabled, look for handler in this frame; 1311 // otherwise, forcibly unwind the frame. 1312 // 1313 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1314 bool force_unwind = !thread->reguard_stack(); 1315 bool deopting = false; 1316 if (nm->is_deopt_pc(pc)) { 1317 deopting = true; 1318 RegisterMap map(thread, false); 1319 frame deoptee = thread->last_frame().sender(&map); 1320 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1321 // Adjust the pc back to the original throwing pc 1322 pc = deoptee.pc(); 1323 } 1324 1325 // If we are forcing an unwind because of stack overflow then deopt is 1326 // irrelevant since we are throwing the frame away anyway. 1327 1328 if (deopting && !force_unwind) { 1329 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1330 } else { 1331 1332 handler_address = 1333 force_unwind ? NULL : nm->handler_for_exception_and_pc(exception, pc); 1334 1335 if (handler_address == NULL) { 1336 bool recursive_exception = false; 1337 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1338 assert (handler_address != NULL, "must have compiled handler"); 1339 // Update the exception cache only when the unwind was not forced 1340 // and there didn't happen another exception during the computation of the 1341 // compiled exception handler. Checking for exception oop equality is not 1342 // sufficient because some exceptions are pre-allocated and reused. 1343 if (!force_unwind && !recursive_exception) { 1344 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1345 } 1346 } else { 1347 #ifdef ASSERT 1348 bool recursive_exception = false; 1349 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1350 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1351 p2i(handler_address), p2i(computed_address)); 1352 #endif 1353 } 1354 } 1355 1356 thread->set_exception_pc(pc); 1357 thread->set_exception_handler_pc(handler_address); 1358 1359 // Check if the exception PC is a MethodHandle call site. 1360 thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1361 } 1362 1363 // Restore correct return pc. Was saved above. 1364 thread->set_exception_oop(exception()); 1365 return handler_address; 1366 1367 JRT_END 1368 1369 // We are entering here from exception_blob 1370 // If there is a compiled exception handler in this method, we will continue there; 1371 // otherwise we will unwind the stack and continue at the caller of top frame method 1372 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1373 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1374 // we looked up the handler for has been deoptimized in the meantime. If it has been 1375 // we must not use the handler and instead return the deopt blob. 1376 address OptoRuntime::handle_exception_C(JavaThread* thread) { 1377 // 1378 // We are in Java not VM and in debug mode we have a NoHandleMark 1379 // 1380 #ifndef PRODUCT 1381 SharedRuntime::_find_handler_ctr++; // find exception handler 1382 #endif 1383 debug_only(NoHandleMark __hm;) 1384 nmethod* nm = NULL; 1385 address handler_address = NULL; 1386 { 1387 // Enter the VM 1388 1389 ResetNoHandleMark rnhm; 1390 handler_address = handle_exception_C_helper(thread, nm); 1391 } 1392 1393 // Back in java: Use no oops, DON'T safepoint 1394 1395 // Now check to see if the handler we are returning is in a now 1396 // deoptimized frame 1397 1398 if (nm != NULL) { 1399 RegisterMap map(thread, false); 1400 frame caller = thread->last_frame().sender(&map); 1401 #ifdef ASSERT 1402 assert(caller.is_compiled_frame(), "must be"); 1403 #endif // ASSERT 1404 if (caller.is_deoptimized_frame()) { 1405 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1406 } 1407 } 1408 return handler_address; 1409 } 1410 1411 //------------------------------rethrow---------------------------------------- 1412 // We get here after compiled code has executed a 'RethrowNode'. The callee 1413 // is either throwing or rethrowing an exception. The callee-save registers 1414 // have been restored, synchronized objects have been unlocked and the callee 1415 // stack frame has been removed. The return address was passed in. 1416 // Exception oop is passed as the 1st argument. This routine is then called 1417 // from the stub. On exit, we know where to jump in the caller's code. 1418 // After this C code exits, the stub will pop his frame and end in a jump 1419 // (instead of a return). We enter the caller's default handler. 1420 // 1421 // This must be JRT_LEAF: 1422 // - caller will not change its state as we cannot block on exit, 1423 // therefore raw_exception_handler_for_return_address is all it takes 1424 // to handle deoptimized blobs 1425 // 1426 // However, there needs to be a safepoint check in the middle! So compiled 1427 // safepoints are completely watertight. 1428 // 1429 // Thus, it cannot be a leaf since it contains the NoGCVerifier. 1430 // 1431 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1432 // 1433 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1434 #ifndef PRODUCT 1435 SharedRuntime::_rethrow_ctr++; // count rethrows 1436 #endif 1437 assert (exception != NULL, "should have thrown a NULLPointerException"); 1438 #ifdef ASSERT 1439 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 1440 // should throw an exception here 1441 ShouldNotReachHere(); 1442 } 1443 #endif 1444 1445 thread->set_vm_result(exception); 1446 // Frame not compiled (handles deoptimization blob) 1447 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1448 } 1449 1450 1451 const TypeFunc *OptoRuntime::rethrow_Type() { 1452 // create input type (domain) 1453 const Type **fields = TypeTuple::fields(1); 1454 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1455 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1456 1457 // create result type (range) 1458 fields = TypeTuple::fields(1); 1459 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1460 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1461 1462 return TypeFunc::make(domain, range); 1463 } 1464 1465 1466 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1467 // Deoptimize the caller before continuing, as the compiled 1468 // exception handler table may not be valid. 1469 if (!StressCompiledExceptionHandlers && doit) { 1470 deoptimize_caller_frame(thread); 1471 } 1472 } 1473 1474 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1475 // Called from within the owner thread, so no need for safepoint 1476 RegisterMap reg_map(thread); 1477 frame stub_frame = thread->last_frame(); 1478 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1479 frame caller_frame = stub_frame.sender(®_map); 1480 1481 // Deoptimize the caller frame. 1482 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1483 } 1484 1485 1486 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1487 // Called from within the owner thread, so no need for safepoint 1488 RegisterMap reg_map(thread); 1489 frame stub_frame = thread->last_frame(); 1490 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1491 frame caller_frame = stub_frame.sender(®_map); 1492 return caller_frame.is_deoptimized_frame(); 1493 } 1494 1495 1496 const TypeFunc *OptoRuntime::register_finalizer_Type() { 1497 // create input type (domain) 1498 const Type **fields = TypeTuple::fields(1); 1499 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1500 // // The JavaThread* is passed to each routine as the last argument 1501 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1502 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1503 1504 // create result type (range) 1505 fields = TypeTuple::fields(0); 1506 1507 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1508 1509 return TypeFunc::make(domain,range); 1510 } 1511 1512 1513 //----------------------------------------------------------------------------- 1514 // Dtrace support. entry and exit probes have the same signature 1515 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() { 1516 // create input type (domain) 1517 const Type **fields = TypeTuple::fields(2); 1518 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1519 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1520 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1521 1522 // create result type (range) 1523 fields = TypeTuple::fields(0); 1524 1525 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1526 1527 return TypeFunc::make(domain,range); 1528 } 1529 1530 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { 1531 // create input type (domain) 1532 const Type **fields = TypeTuple::fields(2); 1533 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1534 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1535 1536 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1537 1538 // create result type (range) 1539 fields = TypeTuple::fields(0); 1540 1541 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1542 1543 return TypeFunc::make(domain,range); 1544 } 1545 1546 1547 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* thread)) 1548 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1549 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1550 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1551 JRT_END 1552 1553 //----------------------------------------------------------------------------- 1554 1555 NamedCounter * volatile OptoRuntime::_named_counters = NULL; 1556 1557 // 1558 // dump the collected NamedCounters. 1559 // 1560 void OptoRuntime::print_named_counters() { 1561 int total_lock_count = 0; 1562 int eliminated_lock_count = 0; 1563 1564 NamedCounter* c = _named_counters; 1565 while (c) { 1566 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1567 int count = c->count(); 1568 if (count > 0) { 1569 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1570 if (Verbose) { 1571 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1572 } 1573 total_lock_count += count; 1574 if (eliminated) { 1575 eliminated_lock_count += count; 1576 } 1577 } 1578 } else if (c->tag() == NamedCounter::BiasedLockingCounter) { 1579 BiasedLockingCounters* blc = ((BiasedLockingNamedCounter*)c)->counters(); 1580 if (blc->nonzero()) { 1581 tty->print_cr("%s", c->name()); 1582 blc->print_on(tty); 1583 } 1584 #if INCLUDE_RTM_OPT 1585 } else if (c->tag() == NamedCounter::RTMLockingCounter) { 1586 RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters(); 1587 if (rlc->nonzero()) { 1588 tty->print_cr("%s", c->name()); 1589 rlc->print_on(tty); 1590 } 1591 #endif 1592 } 1593 c = c->next(); 1594 } 1595 if (total_lock_count > 0) { 1596 tty->print_cr("dynamic locks: %d", total_lock_count); 1597 if (eliminated_lock_count) { 1598 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1599 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1600 } 1601 } 1602 } 1603 1604 // 1605 // Allocate a new NamedCounter. The JVMState is used to generate the 1606 // name which consists of method@line for the inlining tree. 1607 // 1608 1609 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1610 int max_depth = youngest_jvms->depth(); 1611 1612 // Visit scopes from youngest to oldest. 1613 bool first = true; 1614 stringStream st; 1615 for (int depth = max_depth; depth >= 1; depth--) { 1616 JVMState* jvms = youngest_jvms->of_depth(depth); 1617 ciMethod* m = jvms->has_method() ? jvms->method() : NULL; 1618 if (!first) { 1619 st.print(" "); 1620 } else { 1621 first = false; 1622 } 1623 int bci = jvms->bci(); 1624 if (bci < 0) bci = 0; 1625 st.print("%s.%s@%d", m->holder()->name()->as_utf8(), m->name()->as_utf8(), bci); 1626 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1627 } 1628 NamedCounter* c; 1629 if (tag == NamedCounter::BiasedLockingCounter) { 1630 c = new BiasedLockingNamedCounter(st.as_string()); 1631 } else if (tag == NamedCounter::RTMLockingCounter) { 1632 c = new RTMLockingNamedCounter(st.as_string()); 1633 } else { 1634 c = new NamedCounter(st.as_string(), tag); 1635 } 1636 1637 // atomically add the new counter to the head of the list. We only 1638 // add counters so this is safe. 1639 NamedCounter* head; 1640 do { 1641 c->set_next(NULL); 1642 head = _named_counters; 1643 c->set_next(head); 1644 } while (Atomic::cmpxchg(c, &_named_counters, head) != head); 1645 return c; 1646 } 1647 1648 int trace_exception_counter = 0; 1649 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 1650 trace_exception_counter++; 1651 stringStream tempst; 1652 1653 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 1654 exception_oop->print_value_on(&tempst); 1655 tempst.print(" in "); 1656 CodeBlob* blob = CodeCache::find_blob(exception_pc); 1657 if (blob->is_compiled()) { 1658 CompiledMethod* cm = blob->as_compiled_method_or_null(); 1659 cm->method()->print_value_on(&tempst); 1660 } else if (blob->is_runtime_stub()) { 1661 tempst.print("<runtime-stub>"); 1662 } else { 1663 tempst.print("<unknown>"); 1664 } 1665 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 1666 tempst.print("]"); 1667 1668 st->print_raw_cr(tempst.as_string()); 1669 }