1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/bytecodeInterpreter.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "prims/forte.hpp" 42 #include "prims/jvmtiExport.hpp" 43 #include "prims/methodHandles.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/timer.hpp" 48 49 # define __ _masm-> 50 51 52 //------------------------------------------------------------------------------------------------------------------------ 53 // Implementation of InterpreterCodelet 54 55 void InterpreterCodelet::initialize(const char* description, Bytecodes::Code bytecode) { 56 _description = description; 57 _bytecode = bytecode; 58 } 59 60 61 void InterpreterCodelet::verify() { 62 } 63 64 65 void InterpreterCodelet::print_on(outputStream* st) const { 66 ttyLocker ttyl; 67 68 if (PrintInterpreter) { 69 st->cr(); 70 st->print_cr("----------------------------------------------------------------------"); 71 } 72 73 if (description() != NULL) st->print("%s ", description()); 74 if (bytecode() >= 0 ) st->print("%d %s ", bytecode(), Bytecodes::name(bytecode())); 75 st->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "] %d bytes", 76 p2i(code_begin()), p2i(code_end()), code_size()); 77 78 if (PrintInterpreter) { 79 st->cr(); 80 Disassembler::decode(code_begin(), code_end(), st, DEBUG_ONLY(_strings) NOT_DEBUG(CodeStrings())); 81 } 82 } 83 84 CodeletMark::CodeletMark(InterpreterMacroAssembler*& masm, 85 const char* description, 86 Bytecodes::Code bytecode) : 87 _clet((InterpreterCodelet*)AbstractInterpreter::code()->request(codelet_size())), 88 _cb(_clet->code_begin(), _clet->code_size()) { 89 // Request all space (add some slack for Codelet data). 90 assert(_clet != NULL, "we checked not enough space already"); 91 92 // Initialize Codelet attributes. 93 _clet->initialize(description, bytecode); 94 // Create assembler for code generation. 95 masm = new InterpreterMacroAssembler(&_cb); 96 _masm = &masm; 97 } 98 99 CodeletMark::~CodeletMark() { 100 // Align so printing shows nop's instead of random code at the end (Codelets are aligned). 101 (*_masm)->align(wordSize); 102 // Make sure all code is in code buffer. 103 (*_masm)->flush(); 104 105 // Commit Codelet. 106 AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->strings()); 107 // Make sure nobody can use _masm outside a CodeletMark lifespan. 108 *_masm = NULL; 109 } 110 111 //------------------------------------------------------------------------------------------------------------------------ 112 // Implementation of platform independent aspects of Interpreter 113 114 void AbstractInterpreter::initialize() { 115 if (_code != NULL) return; 116 117 // make sure 'imported' classes are initialized 118 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset(); 119 if (PrintBytecodeHistogram) BytecodeHistogram::reset(); 120 if (PrintBytecodePairHistogram) BytecodePairHistogram::reset(); 121 122 InvocationCounter::reinitialize(DelayCompilationDuringStartup); 123 124 } 125 126 void AbstractInterpreter::print() { 127 tty->cr(); 128 tty->print_cr("----------------------------------------------------------------------"); 129 tty->print_cr("Interpreter"); 130 tty->cr(); 131 tty->print_cr("code size = %6dK bytes", (int)_code->used_space()/1024); 132 tty->print_cr("total space = %6dK bytes", (int)_code->total_space()/1024); 133 tty->print_cr("wasted space = %6dK bytes", (int)_code->available_space()/1024); 134 tty->cr(); 135 tty->print_cr("# of codelets = %6d" , _code->number_of_stubs()); 136 tty->print_cr("avg codelet size = %6d bytes", _code->used_space() / _code->number_of_stubs()); 137 tty->cr(); 138 _code->print(); 139 tty->print_cr("----------------------------------------------------------------------"); 140 tty->cr(); 141 } 142 143 144 void interpreter_init() { 145 Interpreter::initialize(); 146 #ifndef PRODUCT 147 if (TraceBytecodes) BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 148 #endif // PRODUCT 149 // need to hit every safepoint in order to call zapping routine 150 // register the interpreter 151 Forte::register_stub( 152 "Interpreter", 153 AbstractInterpreter::code()->code_start(), 154 AbstractInterpreter::code()->code_end() 155 ); 156 157 // notify JVMTI profiler 158 if (JvmtiExport::should_post_dynamic_code_generated()) { 159 JvmtiExport::post_dynamic_code_generated("Interpreter", 160 AbstractInterpreter::code()->code_start(), 161 AbstractInterpreter::code()->code_end()); 162 } 163 } 164 165 //------------------------------------------------------------------------------------------------------------------------ 166 // Implementation of interpreter 167 168 StubQueue* AbstractInterpreter::_code = NULL; 169 bool AbstractInterpreter::_notice_safepoints = false; 170 address AbstractInterpreter::_rethrow_exception_entry = NULL; 171 172 address AbstractInterpreter::_native_entry_begin = NULL; 173 address AbstractInterpreter::_native_entry_end = NULL; 174 address AbstractInterpreter::_slow_signature_handler; 175 address AbstractInterpreter::_entry_table [AbstractInterpreter::number_of_method_entries]; 176 address AbstractInterpreter::_native_abi_to_tosca [AbstractInterpreter::number_of_result_handlers]; 177 178 //------------------------------------------------------------------------------------------------------------------------ 179 // Generation of complete interpreter 180 181 AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) { 182 _masm = NULL; 183 } 184 185 186 static const BasicType types[Interpreter::number_of_result_handlers] = { 187 T_BOOLEAN, 188 T_CHAR , 189 T_BYTE , 190 T_SHORT , 191 T_INT , 192 T_LONG , 193 T_VOID , 194 T_FLOAT , 195 T_DOUBLE , 196 T_OBJECT 197 }; 198 199 void AbstractInterpreterGenerator::generate_all() { 200 201 202 { CodeletMark cm(_masm, "slow signature handler"); 203 Interpreter::_slow_signature_handler = generate_slow_signature_handler(); 204 } 205 206 } 207 208 //------------------------------------------------------------------------------------------------------------------------ 209 // Entry points 210 211 AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) { 212 // Abstract method? 213 if (m->is_abstract()) return abstract; 214 215 // Method handle primitive? 216 if (m->is_method_handle_intrinsic()) { 217 vmIntrinsics::ID id = m->intrinsic_id(); 218 assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic"); 219 MethodKind kind = (MethodKind)( method_handle_invoke_FIRST + 220 ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) ); 221 assert(kind <= method_handle_invoke_LAST, "parallel enum ranges"); 222 return kind; 223 } 224 225 #ifndef CC_INTERP 226 if (UseCRC32Intrinsics && m->is_native()) { 227 // Use optimized stub code for CRC32 native methods. 228 switch (m->intrinsic_id()) { 229 case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update; 230 case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes; 231 case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer; 232 } 233 } 234 #endif 235 236 // Native method? 237 // Note: This test must come _before_ the test for intrinsic 238 // methods. See also comments below. 239 if (m->is_native()) { 240 assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out"); 241 return m->is_synchronized() ? native_synchronized : native; 242 } 243 244 // Synchronized? 245 if (m->is_synchronized()) { 246 return zerolocals_synchronized; 247 } 248 249 if (RegisterFinalizersAtInit && m->code_size() == 1 && 250 m->intrinsic_id() == vmIntrinsics::_Object_init) { 251 // We need to execute the special return bytecode to check for 252 // finalizer registration so create a normal frame. 253 return zerolocals; 254 } 255 256 // Empty method? 257 if (m->is_empty_method()) { 258 return empty; 259 } 260 261 // Special intrinsic method? 262 // Note: This test must come _after_ the test for native methods, 263 // otherwise we will run into problems with JDK 1.2, see also 264 // AbstractInterpreterGenerator::generate_method_entry() for 265 // for details. 266 switch (m->intrinsic_id()) { 267 case vmIntrinsics::_dsin : return java_lang_math_sin ; 268 case vmIntrinsics::_dcos : return java_lang_math_cos ; 269 case vmIntrinsics::_dtan : return java_lang_math_tan ; 270 case vmIntrinsics::_dabs : return java_lang_math_abs ; 271 case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ; 272 case vmIntrinsics::_dlog : return java_lang_math_log ; 273 case vmIntrinsics::_dlog10: return java_lang_math_log10; 274 case vmIntrinsics::_dpow : return java_lang_math_pow ; 275 case vmIntrinsics::_dexp : return java_lang_math_exp ; 276 277 case vmIntrinsics::_Reference_get: 278 return java_lang_ref_reference_get; 279 } 280 281 // Accessor method? 282 if (m->is_accessor()) { 283 assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); 284 return accessor; 285 } 286 287 // Note: for now: zero locals for all non-empty methods 288 return zerolocals; 289 } 290 291 292 void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) { 293 assert(kind >= method_handle_invoke_FIRST && 294 kind <= method_handle_invoke_LAST, "late initialization only for MH entry points"); 295 assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry"); 296 _entry_table[kind] = entry; 297 } 298 299 300 // Return true if the interpreter can prove that the given bytecode has 301 // not yet been executed (in Java semantics, not in actual operation). 302 bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) { 303 Bytecodes::Code code = method()->code_at(bci); 304 305 if (!Bytecodes::must_rewrite(code)) { 306 // might have been reached 307 return false; 308 } 309 310 // the bytecode might not be rewritten if the method is an accessor, etc. 311 address ientry = method->interpreter_entry(); 312 if (ientry != entry_for_kind(AbstractInterpreter::zerolocals) && 313 ientry != entry_for_kind(AbstractInterpreter::zerolocals_synchronized)) 314 return false; // interpreter does not run this method! 315 316 // otherwise, we can be sure this bytecode has never been executed 317 return true; 318 } 319 320 321 #ifndef PRODUCT 322 void AbstractInterpreter::print_method_kind(MethodKind kind) { 323 switch (kind) { 324 case zerolocals : tty->print("zerolocals" ); break; 325 case zerolocals_synchronized: tty->print("zerolocals_synchronized"); break; 326 case native : tty->print("native" ); break; 327 case native_synchronized : tty->print("native_synchronized" ); break; 328 case empty : tty->print("empty" ); break; 329 case accessor : tty->print("accessor" ); break; 330 case abstract : tty->print("abstract" ); break; 331 case java_lang_math_sin : tty->print("java_lang_math_sin" ); break; 332 case java_lang_math_cos : tty->print("java_lang_math_cos" ); break; 333 case java_lang_math_tan : tty->print("java_lang_math_tan" ); break; 334 case java_lang_math_abs : tty->print("java_lang_math_abs" ); break; 335 case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; 336 case java_lang_math_log : tty->print("java_lang_math_log" ); break; 337 case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; 338 case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break; 339 case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break; 340 case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break; 341 default: 342 if (kind >= method_handle_invoke_FIRST && 343 kind <= method_handle_invoke_LAST) { 344 const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind)); 345 if (kind_name[0] == '_') kind_name = &kind_name[1]; // '_invokeExact' => 'invokeExact' 346 tty->print("method_handle_%s", kind_name); 347 break; 348 } 349 ShouldNotReachHere(); 350 break; 351 } 352 } 353 #endif // PRODUCT 354 355 356 //------------------------------------------------------------------------------------------------------------------------ 357 // Deoptimization support 358 359 /** 360 * If a deoptimization happens, this function returns the point of next bytecode to continue execution. 361 */ 362 address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) { 363 assert(method->contains(bcp), "just checkin'"); 364 365 // Get the original and rewritten bytecode. 366 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); 367 assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); 368 369 const int bci = method->bci_from(bcp); 370 371 // compute continuation length 372 const int length = Bytecodes::length_at(method, bcp); 373 374 // compute result type 375 BasicType type = T_ILLEGAL; 376 377 switch (code) { 378 case Bytecodes::_invokevirtual : 379 case Bytecodes::_invokespecial : 380 case Bytecodes::_invokestatic : 381 case Bytecodes::_invokeinterface: { 382 Thread *thread = Thread::current(); 383 ResourceMark rm(thread); 384 methodHandle mh(thread, method); 385 type = Bytecode_invoke(mh, bci).result_type(); 386 // since the cache entry might not be initialized: 387 // (NOT needed for the old calling convension) 388 if (!is_top_frame) { 389 int index = Bytes::get_native_u2(bcp+1); 390 method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters); 391 } 392 break; 393 } 394 395 case Bytecodes::_invokedynamic: { 396 Thread *thread = Thread::current(); 397 ResourceMark rm(thread); 398 methodHandle mh(thread, method); 399 type = Bytecode_invoke(mh, bci).result_type(); 400 // since the cache entry might not be initialized: 401 // (NOT needed for the old calling convension) 402 if (!is_top_frame) { 403 int index = Bytes::get_native_u4(bcp+1); 404 method->constants()->invokedynamic_cp_cache_entry_at(index)->set_parameter_size(callee_parameters); 405 } 406 break; 407 } 408 409 case Bytecodes::_ldc : 410 case Bytecodes::_ldc_w : // fall through 411 case Bytecodes::_ldc2_w: 412 { 413 Thread *thread = Thread::current(); 414 ResourceMark rm(thread); 415 methodHandle mh(thread, method); 416 type = Bytecode_loadconstant(mh, bci).result_type(); 417 break; 418 } 419 420 default: 421 type = Bytecodes::result_type(code); 422 break; 423 } 424 425 // return entry point for computed continuation state & bytecode length 426 return 427 is_top_frame 428 ? Interpreter::deopt_entry (as_TosState(type), length) 429 : Interpreter::return_entry(as_TosState(type), length, code); 430 } 431 432 // If deoptimization happens, this function returns the point where the interpreter reexecutes 433 // the bytecode. 434 // Note: Bytecodes::_athrow is a special case in that it does not return 435 // Interpreter::deopt_entry(vtos, 0) like others 436 address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) { 437 assert(method->contains(bcp), "just checkin'"); 438 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); 439 #ifdef COMPILER1 440 if(code == Bytecodes::_athrow ) { 441 return Interpreter::rethrow_exception_entry(); 442 } 443 #endif /* COMPILER1 */ 444 return Interpreter::deopt_entry(vtos, 0); 445 } 446 447 // If deoptimization happens, the interpreter should reexecute these bytecodes. 448 // This function mainly helps the compilers to set up the reexecute bit. 449 bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) { 450 switch (code) { 451 case Bytecodes::_lookupswitch: 452 case Bytecodes::_tableswitch: 453 case Bytecodes::_fast_binaryswitch: 454 case Bytecodes::_fast_linearswitch: 455 // recompute condtional expression folded into _if<cond> 456 case Bytecodes::_lcmp : 457 case Bytecodes::_fcmpl : 458 case Bytecodes::_fcmpg : 459 case Bytecodes::_dcmpl : 460 case Bytecodes::_dcmpg : 461 case Bytecodes::_ifnull : 462 case Bytecodes::_ifnonnull : 463 case Bytecodes::_goto : 464 case Bytecodes::_goto_w : 465 case Bytecodes::_ifeq : 466 case Bytecodes::_ifne : 467 case Bytecodes::_iflt : 468 case Bytecodes::_ifge : 469 case Bytecodes::_ifgt : 470 case Bytecodes::_ifle : 471 case Bytecodes::_if_icmpeq : 472 case Bytecodes::_if_icmpne : 473 case Bytecodes::_if_icmplt : 474 case Bytecodes::_if_icmpge : 475 case Bytecodes::_if_icmpgt : 476 case Bytecodes::_if_icmple : 477 case Bytecodes::_if_acmpeq : 478 case Bytecodes::_if_acmpne : 479 // special cases 480 case Bytecodes::_getfield : 481 case Bytecodes::_putfield : 482 case Bytecodes::_getstatic : 483 case Bytecodes::_putstatic : 484 case Bytecodes::_aastore : 485 #ifdef COMPILER1 486 //special case of reexecution 487 case Bytecodes::_athrow : 488 #endif 489 return true; 490 491 default: 492 return false; 493 } 494 } 495 496 void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 497 // Quick & dirty stack overflow checking: bang the stack & handle trap. 498 // Note that we do the banging after the frame is setup, since the exception 499 // handling code expects to find a valid interpreter frame on the stack. 500 // Doing the banging earlier fails if the caller frame is not an interpreter 501 // frame. 502 // (Also, the exception throwing code expects to unlock any synchronized 503 // method receiever, so do the banging after locking the receiver.) 504 505 // Bang each page in the shadow zone. We can't assume it's been done for 506 // an interpreter frame with greater than a page of locals, so each page 507 // needs to be checked. Only true for non-native. 508 if (UseStackBanging) { 509 const int start_page = native_call ? StackShadowPages : 1; 510 const int page_size = os::vm_page_size(); 511 for (int pages = start_page; pages <= StackShadowPages ; pages++) { 512 __ bang_stack_with_offset(pages*page_size); 513 } 514 } 515 } 516 517 void AbstractInterpreterGenerator::initialize_method_handle_entries() { 518 // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: 519 for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { 520 Interpreter::MethodKind kind = (Interpreter::MethodKind) i; 521 Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; 522 } 523 }