1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/bytecodeInterpreter.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "prims/forte.hpp" 42 #include "prims/jvmtiExport.hpp" 43 #include "prims/methodHandles.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/timer.hpp" 48 49 # define __ _masm-> 50 51 52 //------------------------------------------------------------------------------------------------------------------------ 53 // Implementation of InterpreterCodelet 54 55 void InterpreterCodelet::initialize(const char* description, Bytecodes::Code bytecode) { 56 _description = description; 57 _bytecode = bytecode; 58 } 59 60 61 void InterpreterCodelet::verify() { 62 } 63 64 65 void InterpreterCodelet::print_on(outputStream* st) const { 66 ttyLocker ttyl; 67 68 if (PrintInterpreter) { 69 st->cr(); 70 st->print_cr("----------------------------------------------------------------------"); 71 } 72 73 if (description() != NULL) st->print("%s ", description()); 74 if (bytecode() >= 0 ) st->print("%d %s ", bytecode(), Bytecodes::name(bytecode())); 75 st->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "] %d bytes", 76 p2i(code_begin()), p2i(code_end()), code_size()); 77 78 if (PrintInterpreter) { 79 st->cr(); 80 Disassembler::decode(code_begin(), code_end(), st, DEBUG_ONLY(_strings) NOT_DEBUG(CodeStrings())); 81 } 82 } 83 84 CodeletMark::CodeletMark(InterpreterMacroAssembler*& masm, 85 const char* description, 86 Bytecodes::Code bytecode) : 87 _clet((InterpreterCodelet*)AbstractInterpreter::code()->request(codelet_size())), 88 _cb(_clet->code_begin(), _clet->code_size()) { 89 // Request all space (add some slack for Codelet data). 90 assert(_clet != NULL, "we checked not enough space already"); 91 92 // Initialize Codelet attributes. 93 _clet->initialize(description, bytecode); 94 // Create assembler for code generation. 95 masm = new InterpreterMacroAssembler(&_cb); 96 _masm = &masm; 97 } 98 99 CodeletMark::~CodeletMark() { 100 // Align so printing shows nop's instead of random code at the end (Codelets are aligned). 101 (*_masm)->align(wordSize); 102 // Make sure all code is in code buffer. 103 (*_masm)->flush(); 104 105 // Commit Codelet. 106 int committed_code_size = (*_masm)->code()->pure_insts_size(); 107 if (committed_code_size) { 108 AbstractInterpreter::code()->commit(committed_code_size, (*_masm)->code()->strings()); 109 } 110 // Make sure nobody can use _masm outside a CodeletMark lifespan. 111 *_masm = NULL; 112 } 113 114 //------------------------------------------------------------------------------------------------------------------------ 115 // Implementation of platform independent aspects of Interpreter 116 117 void AbstractInterpreter::initialize() { 118 if (_code != NULL) return; 119 120 // make sure 'imported' classes are initialized 121 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset(); 122 if (PrintBytecodeHistogram) BytecodeHistogram::reset(); 123 if (PrintBytecodePairHistogram) BytecodePairHistogram::reset(); 124 125 InvocationCounter::reinitialize(DelayCompilationDuringStartup); 126 127 } 128 129 void AbstractInterpreter::print() { 130 tty->cr(); 131 tty->print_cr("----------------------------------------------------------------------"); 132 tty->print_cr("Interpreter"); 133 tty->cr(); 134 tty->print_cr("code size = %6dK bytes", (int)_code->used_space()/1024); 135 tty->print_cr("total space = %6dK bytes", (int)_code->total_space()/1024); 136 tty->print_cr("wasted space = %6dK bytes", (int)_code->available_space()/1024); 137 tty->cr(); 138 tty->print_cr("# of codelets = %6d" , _code->number_of_stubs()); 139 if (_code->number_of_stubs() != 0) { 140 tty->print_cr("avg codelet size = %6d bytes", _code->used_space() / _code->number_of_stubs()); 141 tty->cr(); 142 } 143 _code->print(); 144 tty->print_cr("----------------------------------------------------------------------"); 145 tty->cr(); 146 } 147 148 149 void interpreter_init() { 150 Interpreter::initialize(); 151 #ifndef PRODUCT 152 if (TraceBytecodes) BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 153 #endif // PRODUCT 154 // need to hit every safepoint in order to call zapping routine 155 // register the interpreter 156 Forte::register_stub( 157 "Interpreter", 158 AbstractInterpreter::code()->code_start(), 159 AbstractInterpreter::code()->code_end() 160 ); 161 162 // notify JVMTI profiler 163 if (JvmtiExport::should_post_dynamic_code_generated()) { 164 JvmtiExport::post_dynamic_code_generated("Interpreter", 165 AbstractInterpreter::code()->code_start(), 166 AbstractInterpreter::code()->code_end()); 167 } 168 } 169 170 //------------------------------------------------------------------------------------------------------------------------ 171 // Implementation of interpreter 172 173 StubQueue* AbstractInterpreter::_code = NULL; 174 bool AbstractInterpreter::_notice_safepoints = false; 175 address AbstractInterpreter::_rethrow_exception_entry = NULL; 176 177 address AbstractInterpreter::_native_entry_begin = NULL; 178 address AbstractInterpreter::_native_entry_end = NULL; 179 address AbstractInterpreter::_slow_signature_handler; 180 address AbstractInterpreter::_entry_table [AbstractInterpreter::number_of_method_entries]; 181 address AbstractInterpreter::_native_abi_to_tosca [AbstractInterpreter::number_of_result_handlers]; 182 183 //------------------------------------------------------------------------------------------------------------------------ 184 // Generation of complete interpreter 185 186 AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) { 187 _masm = NULL; 188 } 189 190 191 static const BasicType types[Interpreter::number_of_result_handlers] = { 192 T_BOOLEAN, 193 T_CHAR , 194 T_BYTE , 195 T_SHORT , 196 T_INT , 197 T_LONG , 198 T_VOID , 199 T_FLOAT , 200 T_DOUBLE , 201 T_OBJECT 202 }; 203 204 void AbstractInterpreterGenerator::generate_all() { 205 206 207 { CodeletMark cm(_masm, "slow signature handler"); 208 Interpreter::_slow_signature_handler = generate_slow_signature_handler(); 209 } 210 211 } 212 213 //------------------------------------------------------------------------------------------------------------------------ 214 // Entry points 215 216 AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) { 217 // Abstract method? 218 if (m->is_abstract()) return abstract; 219 220 // Method handle primitive? 221 if (m->is_method_handle_intrinsic()) { 222 vmIntrinsics::ID id = m->intrinsic_id(); 223 assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic"); 224 MethodKind kind = (MethodKind)( method_handle_invoke_FIRST + 225 ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) ); 226 assert(kind <= method_handle_invoke_LAST, "parallel enum ranges"); 227 return kind; 228 } 229 230 #ifndef CC_INTERP 231 if (UseCRC32Intrinsics && m->is_native()) { 232 // Use optimized stub code for CRC32 native methods. 233 switch (m->intrinsic_id()) { 234 case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update; 235 case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes; 236 case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer; 237 } 238 } 239 if (UseCRC32CIntrinsics) { 240 // Use optimized stub code for CRC32C methods. 241 switch (m->intrinsic_id()) { 242 case vmIntrinsics::_updateBytesCRC32C : return java_util_zip_CRC32C_updateBytes; 243 case vmIntrinsics::_updateDirectByteBufferCRC32C : return java_util_zip_CRC32C_updateDirectByteBuffer; 244 } 245 } 246 247 switch(m->intrinsic_id()) { 248 case vmIntrinsics::_intBitsToFloat: return java_lang_Float_intBitsToFloat; 249 case vmIntrinsics::_floatToRawIntBits: return java_lang_Float_floatToRawIntBits; 250 case vmIntrinsics::_longBitsToDouble: return java_lang_Double_longBitsToDouble; 251 case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits; 252 } 253 254 #endif // CC_INTERP 255 256 // Native method? 257 // Note: This test must come _before_ the test for intrinsic 258 // methods. See also comments below. 259 if (m->is_native()) { 260 assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out"); 261 return m->is_synchronized() ? native_synchronized : native; 262 } 263 264 // Synchronized? 265 if (m->is_synchronized()) { 266 return zerolocals_synchronized; 267 } 268 269 if (RegisterFinalizersAtInit && m->code_size() == 1 && 270 m->intrinsic_id() == vmIntrinsics::_Object_init) { 271 // We need to execute the special return bytecode to check for 272 // finalizer registration so create a normal frame. 273 return zerolocals; 274 } 275 276 // Empty method? 277 if (m->is_empty_method()) { 278 return empty; 279 } 280 281 // Special intrinsic method? 282 // Note: This test must come _after_ the test for native methods, 283 // otherwise we will run into problems with JDK 1.2, see also 284 // TemplateInterpreterGenerator::generate_method_entry() for 285 // for details. 286 switch (m->intrinsic_id()) { 287 case vmIntrinsics::_dsin : return java_lang_math_sin ; 288 case vmIntrinsics::_dcos : return java_lang_math_cos ; 289 case vmIntrinsics::_dtan : return java_lang_math_tan ; 290 case vmIntrinsics::_dabs : return java_lang_math_abs ; 291 case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ; 292 case vmIntrinsics::_dlog : return java_lang_math_log ; 293 case vmIntrinsics::_dlog10: return java_lang_math_log10; 294 case vmIntrinsics::_dpow : return java_lang_math_pow ; 295 case vmIntrinsics::_dexp : return java_lang_math_exp ; 296 297 case vmIntrinsics::_Reference_get: 298 return java_lang_ref_reference_get; 299 } 300 301 // Accessor method? 302 if (m->is_getter()) { 303 // TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters. 304 // See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero, 305 // then the call above updated to ::is_accessor 306 assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); 307 return accessor; 308 } 309 310 // Note: for now: zero locals for all non-empty methods 311 return zerolocals; 312 } 313 314 315 void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) { 316 assert(kind >= method_handle_invoke_FIRST && 317 kind <= method_handle_invoke_LAST, "late initialization only for MH entry points"); 318 assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry"); 319 _entry_table[kind] = entry; 320 } 321 322 323 // Return true if the interpreter can prove that the given bytecode has 324 // not yet been executed (in Java semantics, not in actual operation). 325 bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) { 326 Bytecodes::Code code = method()->code_at(bci); 327 328 if (!Bytecodes::must_rewrite(code)) { 329 // might have been reached 330 return false; 331 } 332 333 // the bytecode might not be rewritten if the method is an accessor, etc. 334 address ientry = method->interpreter_entry(); 335 if (ientry != entry_for_kind(AbstractInterpreter::zerolocals) && 336 ientry != entry_for_kind(AbstractInterpreter::zerolocals_synchronized)) 337 return false; // interpreter does not run this method! 338 339 // otherwise, we can be sure this bytecode has never been executed 340 return true; 341 } 342 343 344 #ifndef PRODUCT 345 void AbstractInterpreter::print_method_kind(MethodKind kind) { 346 switch (kind) { 347 case zerolocals : tty->print("zerolocals" ); break; 348 case zerolocals_synchronized: tty->print("zerolocals_synchronized"); break; 349 case native : tty->print("native" ); break; 350 case native_synchronized : tty->print("native_synchronized" ); break; 351 case empty : tty->print("empty" ); break; 352 case accessor : tty->print("accessor" ); break; 353 case abstract : tty->print("abstract" ); break; 354 case java_lang_math_sin : tty->print("java_lang_math_sin" ); break; 355 case java_lang_math_cos : tty->print("java_lang_math_cos" ); break; 356 case java_lang_math_tan : tty->print("java_lang_math_tan" ); break; 357 case java_lang_math_abs : tty->print("java_lang_math_abs" ); break; 358 case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; 359 case java_lang_math_log : tty->print("java_lang_math_log" ); break; 360 case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; 361 case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break; 362 case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break; 363 case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break; 364 case java_util_zip_CRC32C_updateBytes : tty->print("java_util_zip_CRC32C_updateBytes"); break; 365 case java_util_zip_CRC32C_updateDirectByteBuffer: tty->print("java_util_zip_CRC32C_updateDirectByteByffer"); break; 366 default: 367 if (kind >= method_handle_invoke_FIRST && 368 kind <= method_handle_invoke_LAST) { 369 const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind)); 370 if (kind_name[0] == '_') kind_name = &kind_name[1]; // '_invokeExact' => 'invokeExact' 371 tty->print("method_handle_%s", kind_name); 372 break; 373 } 374 ShouldNotReachHere(); 375 break; 376 } 377 } 378 #endif // PRODUCT 379 380 381 //------------------------------------------------------------------------------------------------------------------------ 382 // Deoptimization support 383 384 /** 385 * If a deoptimization happens, this function returns the point of next bytecode to continue execution. 386 */ 387 address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) { 388 assert(method->contains(bcp), "just checkin'"); 389 390 // Get the original and rewritten bytecode. 391 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); 392 assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); 393 394 const int bci = method->bci_from(bcp); 395 396 // compute continuation length 397 const int length = Bytecodes::length_at(method, bcp); 398 399 // compute result type 400 BasicType type = T_ILLEGAL; 401 402 switch (code) { 403 case Bytecodes::_invokevirtual : 404 case Bytecodes::_invokespecial : 405 case Bytecodes::_invokestatic : 406 case Bytecodes::_invokeinterface: { 407 Thread *thread = Thread::current(); 408 ResourceMark rm(thread); 409 methodHandle mh(thread, method); 410 type = Bytecode_invoke(mh, bci).result_type(); 411 // since the cache entry might not be initialized: 412 // (NOT needed for the old calling convension) 413 if (!is_top_frame) { 414 int index = Bytes::get_native_u2(bcp+1); 415 method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters); 416 } 417 break; 418 } 419 420 case Bytecodes::_invokedynamic: { 421 Thread *thread = Thread::current(); 422 ResourceMark rm(thread); 423 methodHandle mh(thread, method); 424 type = Bytecode_invoke(mh, bci).result_type(); 425 // since the cache entry might not be initialized: 426 // (NOT needed for the old calling convension) 427 if (!is_top_frame) { 428 int index = Bytes::get_native_u4(bcp+1); 429 method->constants()->invokedynamic_cp_cache_entry_at(index)->set_parameter_size(callee_parameters); 430 } 431 break; 432 } 433 434 case Bytecodes::_ldc : 435 case Bytecodes::_ldc_w : // fall through 436 case Bytecodes::_ldc2_w: 437 { 438 Thread *thread = Thread::current(); 439 ResourceMark rm(thread); 440 methodHandle mh(thread, method); 441 type = Bytecode_loadconstant(mh, bci).result_type(); 442 break; 443 } 444 445 default: 446 type = Bytecodes::result_type(code); 447 break; 448 } 449 450 // return entry point for computed continuation state & bytecode length 451 return 452 is_top_frame 453 ? Interpreter::deopt_entry (as_TosState(type), length) 454 : Interpreter::return_entry(as_TosState(type), length, code); 455 } 456 457 // If deoptimization happens, this function returns the point where the interpreter reexecutes 458 // the bytecode. 459 // Note: Bytecodes::_athrow is a special case in that it does not return 460 // Interpreter::deopt_entry(vtos, 0) like others 461 address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) { 462 assert(method->contains(bcp), "just checkin'"); 463 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); 464 #if defined(COMPILER1) || INCLUDE_JVMCI 465 if(code == Bytecodes::_athrow ) { 466 return Interpreter::rethrow_exception_entry(); 467 } 468 #endif /* COMPILER1 || INCLUDE_JVMCI */ 469 return Interpreter::deopt_entry(vtos, 0); 470 } 471 472 // If deoptimization happens, the interpreter should reexecute these bytecodes. 473 // This function mainly helps the compilers to set up the reexecute bit. 474 bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) { 475 switch (code) { 476 case Bytecodes::_lookupswitch: 477 case Bytecodes::_tableswitch: 478 case Bytecodes::_fast_binaryswitch: 479 case Bytecodes::_fast_linearswitch: 480 // recompute condtional expression folded into _if<cond> 481 case Bytecodes::_lcmp : 482 case Bytecodes::_fcmpl : 483 case Bytecodes::_fcmpg : 484 case Bytecodes::_dcmpl : 485 case Bytecodes::_dcmpg : 486 case Bytecodes::_ifnull : 487 case Bytecodes::_ifnonnull : 488 case Bytecodes::_goto : 489 case Bytecodes::_goto_w : 490 case Bytecodes::_ifeq : 491 case Bytecodes::_ifne : 492 case Bytecodes::_iflt : 493 case Bytecodes::_ifge : 494 case Bytecodes::_ifgt : 495 case Bytecodes::_ifle : 496 case Bytecodes::_if_icmpeq : 497 case Bytecodes::_if_icmpne : 498 case Bytecodes::_if_icmplt : 499 case Bytecodes::_if_icmpge : 500 case Bytecodes::_if_icmpgt : 501 case Bytecodes::_if_icmple : 502 case Bytecodes::_if_acmpeq : 503 case Bytecodes::_if_acmpne : 504 // special cases 505 case Bytecodes::_getfield : 506 case Bytecodes::_putfield : 507 case Bytecodes::_getstatic : 508 case Bytecodes::_putstatic : 509 case Bytecodes::_aastore : 510 #ifdef COMPILER1 511 //special case of reexecution 512 case Bytecodes::_athrow : 513 #endif 514 return true; 515 516 default: 517 return false; 518 } 519 } 520 521 void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 522 // Quick & dirty stack overflow checking: bang the stack & handle trap. 523 // Note that we do the banging after the frame is setup, since the exception 524 // handling code expects to find a valid interpreter frame on the stack. 525 // Doing the banging earlier fails if the caller frame is not an interpreter 526 // frame. 527 // (Also, the exception throwing code expects to unlock any synchronized 528 // method receiever, so do the banging after locking the receiver.) 529 530 // Bang each page in the shadow zone. We can't assume it's been done for 531 // an interpreter frame with greater than a page of locals, so each page 532 // needs to be checked. Only true for non-native. 533 if (UseStackBanging) { 534 const int page_size = os::vm_page_size(); 535 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 536 const int start_page = native_call ? n_shadow_pages : 1; 537 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 538 __ bang_stack_with_offset(pages*page_size); 539 } 540 } 541 } 542 543 void AbstractInterpreterGenerator::initialize_method_handle_entries() { 544 // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: 545 for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { 546 Interpreter::MethodKind kind = (Interpreter::MethodKind) i; 547 Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; 548 } 549 }