1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/bytecodeInterpreter.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterGenerator.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "interpreter/templateTable.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/arrayOop.hpp" 39 #include "oops/methodData.hpp" 40 #include "oops/method.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "prims/forte.hpp" 43 #include "prims/jvmtiExport.hpp" 44 #include "prims/methodHandles.hpp" 45 #include "runtime/handles.inline.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "runtime/timer.hpp" 49 50 # define __ _masm-> 51 52 53 //------------------------------------------------------------------------------------------------------------------------ 54 // Implementation of InterpreterCodelet 55 56 void InterpreterCodelet::initialize(const char* description, Bytecodes::Code bytecode) { 57 _description = description; 58 _bytecode = bytecode; 59 } 60 61 62 void InterpreterCodelet::verify() { 63 } 64 65 66 void InterpreterCodelet::print_on(outputStream* st) const { 67 ttyLocker ttyl; 68 69 if (PrintInterpreter) { 70 st->cr(); 71 st->print_cr("----------------------------------------------------------------------"); 72 } 73 74 if (description() != NULL) st->print("%s ", description()); 75 if (bytecode() >= 0 ) st->print("%d %s ", bytecode(), Bytecodes::name(bytecode())); 76 st->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "] %d bytes", 77 p2i(code_begin()), p2i(code_end()), code_size()); 78 79 if (PrintInterpreter) { 80 st->cr(); 81 Disassembler::decode(code_begin(), code_end(), st, DEBUG_ONLY(_strings) NOT_DEBUG(CodeStrings())); 82 } 83 } 84 85 CodeletMark::CodeletMark(InterpreterMacroAssembler*& masm, 86 const char* description, 87 Bytecodes::Code bytecode) : 88 _clet((InterpreterCodelet*)AbstractInterpreter::code()->request(codelet_size())), 89 _cb(_clet->code_begin(), _clet->code_size()) { 90 // Request all space (add some slack for Codelet data). 91 assert(_clet != NULL, "we checked not enough space already"); 92 93 // Initialize Codelet attributes. 94 _clet->initialize(description, bytecode); 95 // Create assembler for code generation. 96 masm = new InterpreterMacroAssembler(&_cb); 97 _masm = &masm; 98 } 99 100 CodeletMark::~CodeletMark() { 101 // Align so printing shows nop's instead of random code at the end (Codelets are aligned). 102 (*_masm)->align(wordSize); 103 // Make sure all code is in code buffer. 104 (*_masm)->flush(); 105 106 // Commit Codelet. 107 AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->strings()); 108 // Make sure nobody can use _masm outside a CodeletMark lifespan. 109 *_masm = NULL; 110 } 111 112 //------------------------------------------------------------------------------------------------------------------------ 113 // Implementation of platform independent aspects of Interpreter 114 115 void AbstractInterpreter::initialize() { 116 if (_code != NULL) return; 117 118 // make sure 'imported' classes are initialized 119 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset(); 120 if (PrintBytecodeHistogram) BytecodeHistogram::reset(); 121 if (PrintBytecodePairHistogram) BytecodePairHistogram::reset(); 122 123 InvocationCounter::reinitialize(DelayCompilationDuringStartup); 124 125 } 126 127 void AbstractInterpreter::print() { 128 tty->cr(); 129 tty->print_cr("----------------------------------------------------------------------"); 130 tty->print_cr("Interpreter"); 131 tty->cr(); 132 tty->print_cr("code size = %6dK bytes", (int)_code->used_space()/1024); 133 tty->print_cr("total space = %6dK bytes", (int)_code->total_space()/1024); 134 tty->print_cr("wasted space = %6dK bytes", (int)_code->available_space()/1024); 135 tty->cr(); 136 tty->print_cr("# of codelets = %6d" , _code->number_of_stubs()); 137 if (_code->number_of_stubs() != 0) { 138 tty->print_cr("avg codelet size = %6d bytes", _code->used_space() / _code->number_of_stubs()); 139 tty->cr(); 140 } 141 _code->print(); 142 tty->print_cr("----------------------------------------------------------------------"); 143 tty->cr(); 144 } 145 146 147 void interpreter_init() { 148 Interpreter::initialize(); 149 #ifndef PRODUCT 150 if (TraceBytecodes) BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 151 #endif // PRODUCT 152 // need to hit every safepoint in order to call zapping routine 153 // register the interpreter 154 Forte::register_stub( 155 "Interpreter", 156 AbstractInterpreter::code()->code_start(), 157 AbstractInterpreter::code()->code_end() 158 ); 159 160 // notify JVMTI profiler 161 if (JvmtiExport::should_post_dynamic_code_generated()) { 162 JvmtiExport::post_dynamic_code_generated("Interpreter", 163 AbstractInterpreter::code()->code_start(), 164 AbstractInterpreter::code()->code_end()); 165 } 166 } 167 168 //------------------------------------------------------------------------------------------------------------------------ 169 // Implementation of interpreter 170 171 StubQueue* AbstractInterpreter::_code = NULL; 172 bool AbstractInterpreter::_notice_safepoints = false; 173 address AbstractInterpreter::_rethrow_exception_entry = NULL; 174 175 address AbstractInterpreter::_native_entry_begin = NULL; 176 address AbstractInterpreter::_native_entry_end = NULL; 177 address AbstractInterpreter::_slow_signature_handler; 178 address AbstractInterpreter::_entry_table [AbstractInterpreter::number_of_method_entries]; 179 address AbstractInterpreter::_native_abi_to_tosca [AbstractInterpreter::number_of_result_handlers]; 180 181 //------------------------------------------------------------------------------------------------------------------------ 182 // Generation of complete interpreter 183 184 AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) { 185 _masm = NULL; 186 } 187 188 189 static const BasicType types[Interpreter::number_of_result_handlers] = { 190 T_BOOLEAN, 191 T_CHAR , 192 T_BYTE , 193 T_SHORT , 194 T_INT , 195 T_LONG , 196 T_VOID , 197 T_FLOAT , 198 T_DOUBLE , 199 T_OBJECT 200 }; 201 202 void AbstractInterpreterGenerator::generate_all() { 203 204 205 { CodeletMark cm(_masm, "slow signature handler"); 206 Interpreter::_slow_signature_handler = generate_slow_signature_handler(); 207 } 208 209 } 210 211 //------------------------------------------------------------------------------------------------------------------------ 212 // Entry points 213 214 AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) { 215 // Abstract method? 216 if (m->is_abstract()) return abstract; 217 218 // Method handle primitive? 219 if (m->is_method_handle_intrinsic()) { 220 vmIntrinsics::ID id = m->intrinsic_id(); 221 assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic"); 222 MethodKind kind = (MethodKind)( method_handle_invoke_FIRST + 223 ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) ); 224 assert(kind <= method_handle_invoke_LAST, "parallel enum ranges"); 225 return kind; 226 } 227 228 #ifndef CC_INTERP 229 if (UseCRC32Intrinsics && m->is_native()) { 230 // Use optimized stub code for CRC32 native methods. 231 switch (m->intrinsic_id()) { 232 case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update; 233 case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes; 234 case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer; 235 } 236 } 237 if (UseCRC32CIntrinsics) { 238 // Use optimized stub code for CRC32C methods. 239 switch (m->intrinsic_id()) { 240 case vmIntrinsics::_updateBytesCRC32C : return java_util_zip_CRC32C_updateBytes; 241 case vmIntrinsics::_updateDirectByteBufferCRC32C : return java_util_zip_CRC32C_updateDirectByteBuffer; 242 } 243 } 244 245 switch(m->intrinsic_id()) { 246 case vmIntrinsics::_intBitsToFloat: return java_lang_Float_intBitsToFloat; 247 case vmIntrinsics::_floatToRawIntBits: return java_lang_Float_floatToRawIntBits; 248 case vmIntrinsics::_longBitsToDouble: return java_lang_Double_longBitsToDouble; 249 case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits; 250 } 251 252 #endif // CC_INTERP 253 254 // Native method? 255 // Note: This test must come _before_ the test for intrinsic 256 // methods. See also comments below. 257 if (m->is_native()) { 258 assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out"); 259 return m->is_synchronized() ? native_synchronized : native; 260 } 261 262 // Synchronized? 263 if (m->is_synchronized()) { 264 return zerolocals_synchronized; 265 } 266 267 if (RegisterFinalizersAtInit && m->code_size() == 1 && 268 m->intrinsic_id() == vmIntrinsics::_Object_init) { 269 // We need to execute the special return bytecode to check for 270 // finalizer registration so create a normal frame. 271 return zerolocals; 272 } 273 274 // Empty method? 275 if (m->is_empty_method()) { 276 return empty; 277 } 278 279 // Special intrinsic method? 280 // Note: This test must come _after_ the test for native methods, 281 // otherwise we will run into problems with JDK 1.2, see also 282 // InterpreterGenerator::generate_method_entry() for 283 // for details. 284 switch (m->intrinsic_id()) { 285 case vmIntrinsics::_dsin : return java_lang_math_sin ; 286 case vmIntrinsics::_dcos : return java_lang_math_cos ; 287 case vmIntrinsics::_dtan : return java_lang_math_tan ; 288 case vmIntrinsics::_dabs : return java_lang_math_abs ; 289 case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ; 290 case vmIntrinsics::_dlog : return java_lang_math_log ; 291 case vmIntrinsics::_dlog10: return java_lang_math_log10; 292 case vmIntrinsics::_dpow : return java_lang_math_pow ; 293 case vmIntrinsics::_dexp : return java_lang_math_exp ; 294 295 case vmIntrinsics::_Reference_get: 296 return java_lang_ref_reference_get; 297 } 298 299 // Accessor method? 300 if (m->is_accessor()) { 301 assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); 302 return accessor; 303 } 304 305 // Note: for now: zero locals for all non-empty methods 306 return zerolocals; 307 } 308 309 310 void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) { 311 assert(kind >= method_handle_invoke_FIRST && 312 kind <= method_handle_invoke_LAST, "late initialization only for MH entry points"); 313 assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry"); 314 _entry_table[kind] = entry; 315 } 316 317 318 // Return true if the interpreter can prove that the given bytecode has 319 // not yet been executed (in Java semantics, not in actual operation). 320 bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) { 321 Bytecodes::Code code = method()->code_at(bci); 322 323 if (!Bytecodes::must_rewrite(code)) { 324 // might have been reached 325 return false; 326 } 327 328 // the bytecode might not be rewritten if the method is an accessor, etc. 329 address ientry = method->interpreter_entry(); 330 if (ientry != entry_for_kind(AbstractInterpreter::zerolocals) && 331 ientry != entry_for_kind(AbstractInterpreter::zerolocals_synchronized)) 332 return false; // interpreter does not run this method! 333 334 // otherwise, we can be sure this bytecode has never been executed 335 return true; 336 } 337 338 339 #ifndef PRODUCT 340 void AbstractInterpreter::print_method_kind(MethodKind kind) { 341 switch (kind) { 342 case zerolocals : tty->print("zerolocals" ); break; 343 case zerolocals_synchronized: tty->print("zerolocals_synchronized"); break; 344 case native : tty->print("native" ); break; 345 case native_synchronized : tty->print("native_synchronized" ); break; 346 case empty : tty->print("empty" ); break; 347 case accessor : tty->print("accessor" ); break; 348 case abstract : tty->print("abstract" ); break; 349 case java_lang_math_sin : tty->print("java_lang_math_sin" ); break; 350 case java_lang_math_cos : tty->print("java_lang_math_cos" ); break; 351 case java_lang_math_tan : tty->print("java_lang_math_tan" ); break; 352 case java_lang_math_abs : tty->print("java_lang_math_abs" ); break; 353 case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; 354 case java_lang_math_log : tty->print("java_lang_math_log" ); break; 355 case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; 356 case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break; 357 case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break; 358 case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break; 359 case java_util_zip_CRC32C_updateBytes : tty->print("java_util_zip_CRC32C_updateBytes"); break; 360 case java_util_zip_CRC32C_updateDirectByteBuffer: tty->print("java_util_zip_CRC32C_updateDirectByteByffer"); break; 361 default: 362 if (kind >= method_handle_invoke_FIRST && 363 kind <= method_handle_invoke_LAST) { 364 const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind)); 365 if (kind_name[0] == '_') kind_name = &kind_name[1]; // '_invokeExact' => 'invokeExact' 366 tty->print("method_handle_%s", kind_name); 367 break; 368 } 369 ShouldNotReachHere(); 370 break; 371 } 372 } 373 #endif // PRODUCT 374 375 376 //------------------------------------------------------------------------------------------------------------------------ 377 // Deoptimization support 378 379 /** 380 * If a deoptimization happens, this function returns the point of next bytecode to continue execution. 381 */ 382 address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) { 383 assert(method->contains(bcp), "just checkin'"); 384 385 // Get the original and rewritten bytecode. 386 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); 387 assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); 388 389 const int bci = method->bci_from(bcp); 390 391 // compute continuation length 392 const int length = Bytecodes::length_at(method, bcp); 393 394 // compute result type 395 BasicType type = T_ILLEGAL; 396 397 switch (code) { 398 case Bytecodes::_invokevirtual : 399 case Bytecodes::_invokespecial : 400 case Bytecodes::_invokestatic : 401 case Bytecodes::_invokeinterface: { 402 Thread *thread = Thread::current(); 403 ResourceMark rm(thread); 404 methodHandle mh(thread, method); 405 type = Bytecode_invoke(mh, bci).result_type(); 406 // since the cache entry might not be initialized: 407 // (NOT needed for the old calling convension) 408 if (!is_top_frame) { 409 int index = Bytes::get_native_u2(bcp+1); 410 method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters); 411 } 412 break; 413 } 414 415 case Bytecodes::_invokedynamic: { 416 Thread *thread = Thread::current(); 417 ResourceMark rm(thread); 418 methodHandle mh(thread, method); 419 type = Bytecode_invoke(mh, bci).result_type(); 420 // since the cache entry might not be initialized: 421 // (NOT needed for the old calling convension) 422 if (!is_top_frame) { 423 int index = Bytes::get_native_u4(bcp+1); 424 method->constants()->invokedynamic_cp_cache_entry_at(index)->set_parameter_size(callee_parameters); 425 } 426 break; 427 } 428 429 case Bytecodes::_ldc : 430 case Bytecodes::_ldc_w : // fall through 431 case Bytecodes::_ldc2_w: 432 { 433 Thread *thread = Thread::current(); 434 ResourceMark rm(thread); 435 methodHandle mh(thread, method); 436 type = Bytecode_loadconstant(mh, bci).result_type(); 437 break; 438 } 439 440 default: 441 type = Bytecodes::result_type(code); 442 break; 443 } 444 445 // return entry point for computed continuation state & bytecode length 446 return 447 is_top_frame 448 ? Interpreter::deopt_entry (as_TosState(type), length) 449 : Interpreter::return_entry(as_TosState(type), length, code); 450 } 451 452 // If deoptimization happens, this function returns the point where the interpreter reexecutes 453 // the bytecode. 454 // Note: Bytecodes::_athrow is a special case in that it does not return 455 // Interpreter::deopt_entry(vtos, 0) like others 456 address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) { 457 assert(method->contains(bcp), "just checkin'"); 458 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); 459 #ifdef COMPILER1 460 if(code == Bytecodes::_athrow ) { 461 return Interpreter::rethrow_exception_entry(); 462 } 463 #endif /* COMPILER1 */ 464 return Interpreter::deopt_entry(vtos, 0); 465 } 466 467 // If deoptimization happens, the interpreter should reexecute these bytecodes. 468 // This function mainly helps the compilers to set up the reexecute bit. 469 bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) { 470 switch (code) { 471 case Bytecodes::_lookupswitch: 472 case Bytecodes::_tableswitch: 473 case Bytecodes::_fast_binaryswitch: 474 case Bytecodes::_fast_linearswitch: 475 // recompute condtional expression folded into _if<cond> 476 case Bytecodes::_lcmp : 477 case Bytecodes::_fcmpl : 478 case Bytecodes::_fcmpg : 479 case Bytecodes::_dcmpl : 480 case Bytecodes::_dcmpg : 481 case Bytecodes::_ifnull : 482 case Bytecodes::_ifnonnull : 483 case Bytecodes::_goto : 484 case Bytecodes::_goto_w : 485 case Bytecodes::_ifeq : 486 case Bytecodes::_ifne : 487 case Bytecodes::_iflt : 488 case Bytecodes::_ifge : 489 case Bytecodes::_ifgt : 490 case Bytecodes::_ifle : 491 case Bytecodes::_if_icmpeq : 492 case Bytecodes::_if_icmpne : 493 case Bytecodes::_if_icmplt : 494 case Bytecodes::_if_icmpge : 495 case Bytecodes::_if_icmpgt : 496 case Bytecodes::_if_icmple : 497 case Bytecodes::_if_acmpeq : 498 case Bytecodes::_if_acmpne : 499 // special cases 500 case Bytecodes::_getfield : 501 case Bytecodes::_putfield : 502 case Bytecodes::_getstatic : 503 case Bytecodes::_putstatic : 504 case Bytecodes::_aastore : 505 #ifdef COMPILER1 506 //special case of reexecution 507 case Bytecodes::_athrow : 508 #endif 509 return true; 510 511 default: 512 return false; 513 } 514 } 515 516 void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 517 // Quick & dirty stack overflow checking: bang the stack & handle trap. 518 // Note that we do the banging after the frame is setup, since the exception 519 // handling code expects to find a valid interpreter frame on the stack. 520 // Doing the banging earlier fails if the caller frame is not an interpreter 521 // frame. 522 // (Also, the exception throwing code expects to unlock any synchronized 523 // method receiever, so do the banging after locking the receiver.) 524 525 // Bang each page in the shadow zone. We can't assume it's been done for 526 // an interpreter frame with greater than a page of locals, so each page 527 // needs to be checked. Only true for non-native. 528 if (UseStackBanging) { 529 const int start_page = native_call ? StackShadowPages : 1; 530 const int page_size = os::vm_page_size(); 531 for (int pages = start_page; pages <= StackShadowPages ; pages++) { 532 __ bang_stack_with_offset(pages*page_size); 533 } 534 } 535 } 536 537 void AbstractInterpreterGenerator::initialize_method_handle_entries() { 538 // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: 539 for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { 540 Interpreter::MethodKind kind = (Interpreter::MethodKind) i; 541 Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; 542 } 543 } 544 545 // Generate method entries 546 address InterpreterGenerator::generate_method_entry( 547 AbstractInterpreter::MethodKind kind) { 548 // determine code generation flags 549 bool synchronized = false; 550 address entry_point = NULL; 551 552 switch (kind) { 553 case Interpreter::zerolocals : break; 554 case Interpreter::zerolocals_synchronized: synchronized = true; break; 555 case Interpreter::native : entry_point = generate_native_entry(false); break; 556 case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break; 557 case Interpreter::empty : entry_point = generate_empty_entry(); break; 558 case Interpreter::accessor : entry_point = generate_accessor_entry(); break; 559 case Interpreter::abstract : entry_point = generate_abstract_entry(); break; 560 561 case Interpreter::java_lang_math_sin : // fall thru 562 case Interpreter::java_lang_math_cos : // fall thru 563 case Interpreter::java_lang_math_tan : // fall thru 564 case Interpreter::java_lang_math_abs : // fall thru 565 case Interpreter::java_lang_math_log : // fall thru 566 case Interpreter::java_lang_math_log10 : // fall thru 567 case Interpreter::java_lang_math_sqrt : // fall thru 568 case Interpreter::java_lang_math_pow : // fall thru 569 case Interpreter::java_lang_math_exp : entry_point = generate_math_entry(kind); break; 570 case Interpreter::java_lang_ref_reference_get 571 : entry_point = generate_Reference_get_entry(); break; 572 #ifndef CC_INTERP 573 case Interpreter::java_util_zip_CRC32_update 574 : entry_point = generate_CRC32_update_entry(); break; 575 case Interpreter::java_util_zip_CRC32_updateBytes 576 : // fall thru 577 case Interpreter::java_util_zip_CRC32_updateByteBuffer 578 : entry_point = generate_CRC32_updateBytes_entry(kind); break; 579 #if defined(TARGET_ARCH_x86) && !defined(_LP64) 580 // On x86_32 platforms, a special entry is generated for the following four methods. 581 // On other platforms the normal entry is used to enter these methods. 582 case Interpreter::java_lang_Float_intBitsToFloat 583 : entry_point = generate_Float_intBitsToFloat_entry(); break; 584 case Interpreter::java_lang_Float_floatToRawIntBits 585 : entry_point = generate_Float_floatToRawIntBits_entry(); break; 586 case Interpreter::java_lang_Double_longBitsToDouble 587 : entry_point = generate_Double_longBitsToDouble_entry(); break; 588 case Interpreter::java_lang_Double_doubleToRawLongBits 589 : entry_point = generate_Double_doubleToRawLongBits_entry(); break; 590 #else 591 case Interpreter::java_lang_Float_intBitsToFloat: 592 case Interpreter::java_lang_Float_floatToRawIntBits: 593 case Interpreter::java_lang_Double_longBitsToDouble: 594 case Interpreter::java_lang_Double_doubleToRawLongBits: 595 entry_point = generate_native_entry(false); 596 break; 597 #endif // defined(TARGET_ARCH_x86) && !defined(_LP64) 598 #endif // CC_INTERP 599 default: 600 fatal(err_msg("unexpected method kind: %d", kind)); 601 break; 602 } 603 604 if (entry_point) { 605 return entry_point; 606 } 607 608 return generate_normal_entry(synchronized); 609 }