1 /* 2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024; 58 59 #define __ _masm-> 60 61 //------------------------------------------------------------------------------------------------------------------------ 62 63 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 64 address entry = __ pc(); 65 66 // callee-save register for saving LR, shared with generate_native_entry 67 const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0); 68 69 __ mov(Rsaved_ret_addr, LR); 70 71 __ mov(R1, Rmethod); 72 __ mov(R2, Rlocals); 73 __ mov(R3, SP); 74 75 #ifdef AARCH64 76 // expand expr. stack and extended SP to avoid cutting SP in call_VM 77 __ mov(Rstack_top, SP); 78 __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 79 __ check_stack_top(); 80 81 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false); 82 83 __ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed)); 84 __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed)); 85 __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed)); 86 __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed)); 87 88 __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed)); 89 __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed)); 90 __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed)); 91 __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed)); 92 #else 93 94 // Safer to save R9 (when scratched) since callers may have been 95 // written assuming R9 survives. This is suboptimal but 96 // probably not important for this slow case call site. 97 // Note for R9 saving: slow_signature_handler may copy register 98 // arguments above the current SP (passed as R3). It is safe for 99 // call_VM to use push and pop to protect additional values on the 100 // stack if needed. 101 __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/); 102 __ add(SP, SP, wordSize); // Skip R0 103 __ pop(RegisterSet(R1, R3)); // Load arguments passed in registers 104 #ifdef __ABI_HARD__ 105 // Few alternatives to an always-load-FP-registers approach: 106 // - parse method signature to detect FP arguments 107 // - keep a counter/flag on a stack indicationg number of FP arguments in the method. 108 // The later has been originally implemented and tested but a conditional path could 109 // eliminate any gain imposed by avoiding 8 double word loads. 110 __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback); 111 #endif // __ABI_HARD__ 112 #endif // AARCH64 113 114 __ ret(Rsaved_ret_addr); 115 116 return entry; 117 } 118 119 120 // 121 // Various method entries (that c++ and asm interpreter agree upon) 122 //------------------------------------------------------------------------------------------------------------------------ 123 // 124 // 125 126 // Abstract method entry 127 // Attempt to execute abstract method. Throw exception 128 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 129 address entry_point = __ pc(); 130 131 #ifdef AARCH64 132 __ restore_sp_after_call(Rtemp); 133 __ restore_stack_top(); 134 #endif 135 136 __ empty_expression_stack(); 137 138 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 139 140 DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here 141 return entry_point; 142 } 143 144 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 145 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 146 147 // TODO: ARM 148 return NULL; 149 150 address entry_point = __ pc(); 151 STOP("generate_math_entry"); 152 return entry_point; 153 } 154 155 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 156 address entry = __ pc(); 157 158 // Note: There should be a minimal interpreter frame set up when stack 159 // overflow occurs since we check explicitly for it now. 160 // 161 #ifdef ASSERT 162 { Label L; 163 __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize); 164 __ cmp(SP, Rtemp); // Rtemp = maximal SP for current FP, 165 // (stack grows negative) 166 __ b(L, ls); // check if frame is complete 167 __ stop ("interpreter frame not set up"); 168 __ bind(L); 169 } 170 #endif // ASSERT 171 172 // Restore bcp under the assumption that the current frame is still 173 // interpreted 174 __ restore_bcp(); 175 176 // expression stack must be empty before entering the VM if an exception 177 // happened 178 __ empty_expression_stack(); 179 180 // throw exception 181 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 182 183 __ should_not_reach_here(); 184 185 return entry; 186 } 187 188 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 189 address entry = __ pc(); 190 191 // index is in R4_ArrayIndexOutOfBounds_index 192 193 InlinedString Lname(name); 194 195 // expression stack must be empty before entering the VM if an exception happened 196 __ empty_expression_stack(); 197 198 // setup parameters 199 __ ldr_literal(R1, Lname); 200 __ mov(R2, R4_ArrayIndexOutOfBounds_index); 201 202 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); 203 204 __ nop(); // to avoid filling CPU pipeline with invalid instructions 205 __ nop(); 206 __ should_not_reach_here(); 207 __ bind_literal(Lname); 208 209 return entry; 210 } 211 212 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 213 address entry = __ pc(); 214 215 // object is in R2_ClassCastException_obj 216 217 // expression stack must be empty before entering the VM if an exception 218 // happened 219 __ empty_expression_stack(); 220 221 __ mov(R1, R2_ClassCastException_obj); 222 __ call_VM(noreg, 223 CAST_FROM_FN_PTR(address, 224 InterpreterRuntime::throw_ClassCastException), 225 R1); 226 227 __ should_not_reach_here(); 228 229 return entry; 230 } 231 232 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 233 assert(!pass_oop || message == NULL, "either oop or message but not both"); 234 address entry = __ pc(); 235 236 InlinedString Lname(name); 237 InlinedString Lmessage(message); 238 239 if (pass_oop) { 240 // object is at TOS 241 __ pop_ptr(R2); 242 } 243 244 // expression stack must be empty before entering the VM if an exception happened 245 __ empty_expression_stack(); 246 247 // setup parameters 248 __ ldr_literal(R1, Lname); 249 250 if (pass_oop) { 251 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2); 252 } else { 253 if (message != NULL) { 254 __ ldr_literal(R2, Lmessage); 255 } else { 256 __ mov(R2, 0); 257 } 258 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2); 259 } 260 261 // throw exception 262 __ b(Interpreter::throw_exception_entry()); 263 264 __ nop(); // to avoid filling CPU pipeline with invalid instructions 265 __ nop(); 266 __ bind_literal(Lname); 267 if (!pass_oop && (message != NULL)) { 268 __ bind_literal(Lmessage); 269 } 270 271 return entry; 272 } 273 274 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 275 address entry = __ pc(); 276 277 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 278 279 #ifdef AARCH64 280 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 281 __ restore_stack_top(); 282 #else 283 // Restore stack bottom in case i2c adjusted stack 284 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 285 // and NULL it as marker that SP is now tos until next java call 286 __ mov(Rtemp, (int)NULL_WORD); 287 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 288 #endif // AARCH64 289 290 __ restore_method(); 291 __ restore_bcp(); 292 __ restore_dispatch(); 293 __ restore_locals(); 294 295 const Register Rcache = R2_tmp; 296 const Register Rindex = R3_tmp; 297 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 298 299 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 300 __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 301 __ check_stack_top(); 302 __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); 303 304 #ifndef AARCH64 305 __ convert_retval_to_tos(state); 306 #endif // !AARCH64 307 308 __ check_and_handle_popframe(); 309 __ check_and_handle_earlyret(); 310 311 __ dispatch_next(state, step); 312 313 return entry; 314 } 315 316 317 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 318 address entry = __ pc(); 319 320 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 321 322 #ifdef AARCH64 323 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 324 __ restore_stack_top(); 325 #else 326 // The stack is not extended by deopt but we must NULL last_sp as this 327 // entry is like a "return". 328 __ mov(Rtemp, 0); 329 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 330 #endif // AARCH64 331 332 __ restore_method(); 333 __ restore_bcp(); 334 __ restore_dispatch(); 335 __ restore_locals(); 336 337 // handle exceptions 338 { Label L; 339 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 340 __ cbz(Rtemp, L); 341 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 342 __ should_not_reach_here(); 343 __ bind(L); 344 } 345 346 if (continuation == NULL) { 347 __ dispatch_next(state, step); 348 } else { 349 __ jump_to_entry(continuation); 350 } 351 352 return entry; 353 } 354 355 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 356 #ifdef AARCH64 357 address entry = __ pc(); 358 switch (type) { 359 case T_BOOLEAN: 360 __ tst(R0, 0xff); 361 __ cset(R0, ne); 362 break; 363 case T_CHAR : __ zero_extend(R0, R0, 16); break; 364 case T_BYTE : __ sign_extend(R0, R0, 8); break; 365 case T_SHORT : __ sign_extend(R0, R0, 16); break; 366 case T_INT : // fall through 367 case T_LONG : // fall through 368 case T_VOID : // fall through 369 case T_FLOAT : // fall through 370 case T_DOUBLE : /* nothing to do */ break; 371 case T_OBJECT : 372 // retrieve result from frame 373 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 374 // and verify it 375 __ verify_oop(R0); 376 break; 377 default : ShouldNotReachHere(); 378 } 379 __ ret(); 380 return entry; 381 #else 382 // Result handlers are not used on 32-bit ARM 383 // since the returned value is already in appropriate format. 384 __ should_not_reach_here(); // to avoid empty code block 385 386 // The result handler non-zero indicates an object is returned and this is 387 // used in the native entry code. 388 return type == T_OBJECT ? (address)(-1) : NULL; 389 #endif // AARCH64 390 } 391 392 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 393 address entry = __ pc(); 394 __ push(state); 395 __ call_VM(noreg, runtime_entry); 396 397 // load current bytecode 398 __ ldrb(R3_bytecode, Address(Rbcp)); 399 __ dispatch_only_normal(vtos); 400 return entry; 401 } 402 403 404 // Helpers for commoning out cases in the various type of method entries. 405 // 406 407 // increment invocation count & check for overflow 408 // 409 // Note: checking for negative value instead of overflow 410 // so we have a 'sticky' overflow test 411 // 412 // In: Rmethod. 413 // 414 // Uses R0, R1, Rtemp. 415 // 416 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, 417 Label* profile_method, 418 Label* profile_method_continue) { 419 Label done; 420 const Register Rcounters = Rtemp; 421 const Address invocation_counter(Rcounters, 422 MethodCounters::invocation_counter_offset() + 423 InvocationCounter::counter_offset()); 424 425 // Note: In tiered we increment either counters in MethodCounters* or 426 // in MDO depending if we're profiling or not. 427 if (TieredCompilation) { 428 int increment = InvocationCounter::count_increment; 429 Label no_mdo; 430 if (ProfileInterpreter) { 431 // Are we profiling? 432 __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset())); 433 __ cbz(R1_tmp, no_mdo); 434 // Increment counter in the MDO 435 const Address mdo_invocation_counter(R1_tmp, 436 in_bytes(MethodData::invocation_counter_offset()) + 437 in_bytes(InvocationCounter::counter_offset())); 438 const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset())); 439 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow); 440 __ b(done); 441 } 442 __ bind(no_mdo); 443 __ get_method_counters(Rmethod, Rcounters, done); 444 const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset())); 445 __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow); 446 __ bind(done); 447 } else { // not TieredCompilation 448 const Address backedge_counter(Rcounters, 449 MethodCounters::backedge_counter_offset() + 450 InvocationCounter::counter_offset()); 451 452 const Register Ricnt = R0_tmp; // invocation counter 453 const Register Rbcnt = R1_tmp; // backedge counter 454 455 __ get_method_counters(Rmethod, Rcounters, done); 456 457 if (ProfileInterpreter) { 458 const Register Riic = R1_tmp; 459 __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 460 __ add(Riic, Riic, 1); 461 __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 462 } 463 464 // Update standard invocation counters 465 466 __ ldr_u32(Ricnt, invocation_counter); 467 __ ldr_u32(Rbcnt, backedge_counter); 468 469 __ add(Ricnt, Ricnt, InvocationCounter::count_increment); 470 471 #ifdef AARCH64 472 __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits 473 #else 474 __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits 475 #endif // AARCH64 476 477 __ str_32(Ricnt, invocation_counter); // save invocation count 478 __ add(Ricnt, Ricnt, Rbcnt); // add both counters 479 480 // profile_method is non-null only for interpreted method so 481 // profile_method != NULL == !native_call 482 // BytecodeInterpreter only calls for native so code is elided. 483 484 if (ProfileInterpreter && profile_method != NULL) { 485 assert(profile_method_continue != NULL, "should be non-null"); 486 487 // Test to see if we should create a method data oop 488 // Reuse R1_tmp as we don't need backedge counters anymore. 489 Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 490 __ ldr_s32(R1_tmp, profile_limit); 491 __ cmp_32(Ricnt, R1_tmp); 492 __ b(*profile_method_continue, lt); 493 494 // if no method data exists, go to profile_method 495 __ test_method_data_pointer(R1_tmp, *profile_method); 496 } 497 498 Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 499 __ ldr_s32(R1_tmp, invoke_limit); 500 __ cmp_32(Ricnt, R1_tmp); 501 __ b(*overflow, hs); 502 __ bind(done); 503 } 504 } 505 506 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 507 // InterpreterRuntime::frequency_counter_overflow takes one argument 508 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 509 // The call returns the address of the verified entry point for the method or NULL 510 // if the compilation did not complete (either went background or bailed out). 511 __ mov(R1, (int)false); 512 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 513 514 // jump to the interpreted entry. 515 __ b(do_continue); 516 } 517 518 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 519 // Check if we've got enough room on the stack for 520 // - overhead; 521 // - locals; 522 // - expression stack. 523 // 524 // Registers on entry: 525 // 526 // R3 = number of additional locals 527 // R11 = max expression stack slots (AArch64 only) 528 // Rthread 529 // Rmethod 530 // Registers used: R0, R1, R2, Rtemp. 531 532 const Register Radditional_locals = R3; 533 const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2); 534 535 // monitor entry size 536 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 537 538 // total overhead size: entry_size + (saved registers, thru expr stack bottom). 539 // be sure to change this if you add/subtract anything to/from the overhead area 540 const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size; 541 542 // Pages reserved for VM runtime calls and subsequent Java calls. 543 const int reserved_pages = JavaThread::stack_shadow_zone_size(); 544 545 // Thread::stack_size() includes guard pages, and they should not be touched. 546 const int guard_pages = JavaThread::stack_guard_zone_size(); 547 548 __ ldr(R0, Address(Rthread, Thread::stack_base_offset())); 549 __ ldr(R1, Address(Rthread, Thread::stack_size_offset())); 550 #ifndef AARCH64 551 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 552 __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset())); 553 #endif // !AARCH64 554 __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words()); 555 556 // reserve space for additional locals 557 __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize)); 558 559 // stack size 560 __ sub(R0, R0, R1); 561 562 // reserve space for expression stack 563 __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 564 565 __ cmp(Rtemp, R0); 566 567 #ifdef AARCH64 568 Label L; 569 __ b(L, hi); 570 __ mov(SP, Rsender_sp); // restore SP 571 __ b(StubRoutines::throw_StackOverflowError_entry()); 572 __ bind(L); 573 #else 574 __ mov(SP, Rsender_sp, ls); // restore SP 575 __ b(StubRoutines::throw_StackOverflowError_entry(), ls); 576 #endif // AARCH64 577 } 578 579 580 // Allocate monitor and lock method (asm interpreter) 581 // 582 void TemplateInterpreterGenerator::lock_method() { 583 // synchronize method 584 585 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 586 assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment"); 587 588 #ifdef ASSERT 589 { Label L; 590 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 591 __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 592 __ stop("method doesn't need synchronization"); 593 __ bind(L); 594 } 595 #endif // ASSERT 596 597 // get synchronization object 598 { Label done; 599 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 600 #ifdef AARCH64 601 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 602 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done); 603 #else 604 __ tst(Rtemp, JVM_ACC_STATIC); 605 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case) 606 __ b(done, eq); 607 #endif // AARCH64 608 __ load_mirror(R0, Rmethod, Rtemp); 609 __ bind(done); 610 } 611 612 // add space for monitor & lock 613 614 #ifdef AARCH64 615 __ check_extended_sp(Rtemp); 616 __ sub(SP, SP, entry_size); // adjust extended SP 617 __ mov(Rtemp, SP); 618 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 619 #endif // AARCH64 620 621 __ sub(Rstack_top, Rstack_top, entry_size); 622 __ check_stack_top_on_expansion(); 623 // add space for a monitor entry 624 __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 625 // set new monitor block top 626 __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes())); 627 // store object 628 __ mov(R1, Rstack_top); // monitor entry address 629 __ lock_object(R1); 630 } 631 632 #ifdef AARCH64 633 634 // 635 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 636 // and for native methods hence the shared code. 637 // 638 // On entry: 639 // R10 = ConstMethod 640 // R11 = max expr. stack (in slots), if !native_call 641 // 642 // On exit: 643 // Rbcp, Rstack_top are initialized, SP is extended 644 // 645 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 646 // Incoming registers 647 const Register RconstMethod = R10; 648 const Register RmaxStack = R11; 649 // Temporary registers 650 const Register RextendedSP = R0; 651 const Register Rcache = R1; 652 const Register Rmdp = ProfileInterpreter ? R2 : ZR; 653 654 // Generates the following stack layout (stack grows up in this picture): 655 // 656 // [ expr. stack bottom ] 657 // [ saved Rbcp ] 658 // [ current Rlocals ] 659 // [ cache ] 660 // [ mdx ] 661 // [ mirror ] 662 // [ Method* ] 663 // [ extended SP ] 664 // [ expr. stack top ] 665 // [ sender_sp ] 666 // [ saved FP ] <--- FP 667 // [ saved LR ] 668 669 // initialize fixed part of activation frame 670 __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed)); 671 __ mov(FP, SP); // establish new FP 672 673 // setup Rbcp 674 if (native_call) { 675 __ mov(Rbcp, ZR); // bcp = 0 for native calls 676 } else { 677 __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase 678 } 679 680 // Rstack_top & RextendedSP 681 __ sub(Rstack_top, SP, 10*wordSize); 682 if (native_call) { 683 __ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling 684 } else { 685 __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 686 __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes); 687 } 688 __ mov(SP, RextendedSP); 689 __ check_stack_top(); 690 691 // Load Rmdp 692 if (ProfileInterpreter) { 693 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 694 __ tst(Rtemp, Rtemp); 695 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 696 __ csel(Rmdp, ZR, Rtemp, eq); 697 } 698 699 // Load Rcache 700 __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset())); 701 __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 702 // Get mirror and store it in the frame as GC root for this Method* 703 __ load_mirror(Rtemp, Rmethod, Rtemp); 704 705 // Build fixed frame 706 __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize)); 707 __ stp(Rlocals, Rcache, Address(FP, -8*wordSize)); 708 __ stp(Rmdp, Rtemp, Address(FP, -6*wordSize)); 709 __ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize)); 710 __ stp(ZR, Rsender_sp, Address(FP, -2*wordSize)); 711 assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken"); 712 assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken"); 713 } 714 715 #else // AARCH64 716 717 // 718 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 719 // and for native methods hence the shared code. 720 721 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 722 // Generates the following stack layout: 723 // 724 // [ expr. stack bottom ] 725 // [ saved Rbcp ] 726 // [ current Rlocals ] 727 // [ cache ] 728 // [ mdx ] 729 // [ Method* ] 730 // [ last_sp ] 731 // [ sender_sp ] 732 // [ saved FP ] <--- FP 733 // [ saved LR ] 734 735 // initialize fixed part of activation frame 736 __ push(LR); // save return address 737 __ push(FP); // save FP 738 __ mov(FP, SP); // establish new FP 739 740 __ push(Rsender_sp); 741 742 __ mov(R0, 0); 743 __ push(R0); // leave last_sp as null 744 745 // setup Rbcp 746 if (native_call) { 747 __ mov(Rbcp, 0); // bcp = 0 for native calls 748 } else { 749 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod* 750 __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase 751 } 752 753 __ push(Rmethod); // save Method* 754 // Get mirror and store it in the frame as GC root for this Method* 755 __ load_mirror(Rtemp, Rmethod, Rtemp); 756 __ push(Rtemp); 757 758 if (ProfileInterpreter) { 759 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 760 __ tst(Rtemp, Rtemp); 761 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne); 762 __ push(Rtemp); // set the mdp (method data pointer) 763 } else { 764 __ push(R0); 765 } 766 767 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 768 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 769 __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 770 __ push(Rtemp); // set constant pool cache 771 __ push(Rlocals); // set locals pointer 772 __ push(Rbcp); // set bcp 773 __ push(R0); // reserve word for pointer to expression stack bottom 774 __ str(SP, Address(SP, 0)); // set expression stack bottom 775 } 776 777 #endif // AARCH64 778 779 // End of helpers 780 781 //------------------------------------------------------------------------------------------------------------------------ 782 // Entry points 783 // 784 // Here we generate the various kind of entries into the interpreter. 785 // The two main entry type are generic bytecode methods and native call method. 786 // These both come in synchronized and non-synchronized versions but the 787 // frame layout they create is very similar. The other method entry 788 // types are really just special purpose entries that are really entry 789 // and interpretation all in one. These are for trivial methods like 790 // accessor, empty, or special math methods. 791 // 792 // When control flow reaches any of the entry types for the interpreter 793 // the following holds -> 794 // 795 // Arguments: 796 // 797 // Rmethod: Method* 798 // Rthread: thread 799 // Rsender_sp: sender sp 800 // Rparams (SP on 32-bit ARM): pointer to method parameters 801 // 802 // LR: return address 803 // 804 // Stack layout immediately at entry 805 // 806 // [ optional padding(*)] <--- SP (AArch64) 807 // [ parameter n ] <--- Rparams (SP on 32-bit ARM) 808 // ... 809 // [ parameter 1 ] 810 // [ expression stack ] (caller's java expression stack) 811 812 // Assuming that we don't go to one of the trivial specialized 813 // entries the stack will look like below when we are ready to execute 814 // the first bytecode (or call the native routine). The register usage 815 // will be as the template based interpreter expects. 816 // 817 // local variables follow incoming parameters immediately; i.e. 818 // the return address is saved at the end of the locals. 819 // 820 // [ reserved stack (*) ] <--- SP (AArch64) 821 // [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM) 822 // [ monitor entry ] 823 // ... 824 // [ monitor entry ] 825 // [ expr. stack bottom ] 826 // [ saved Rbcp ] 827 // [ current Rlocals ] 828 // [ cache ] 829 // [ mdx ] 830 // [ mirror ] 831 // [ Method* ] 832 // 833 // 32-bit ARM: 834 // [ last_sp ] 835 // 836 // AArch64: 837 // [ extended SP (*) ] 838 // [ stack top (*) ] 839 // 840 // [ sender_sp ] 841 // [ saved FP ] <--- FP 842 // [ saved LR ] 843 // [ optional padding(*)] 844 // [ local variable m ] 845 // ... 846 // [ local variable 1 ] 847 // [ parameter n ] 848 // ... 849 // [ parameter 1 ] <--- Rlocals 850 // 851 // (*) - AArch64 only 852 // 853 854 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 855 // Code: _aload_0, _getfield, _areturn 856 // parameter size = 1 857 // 858 // The code that gets generated by this routine is split into 2 parts: 859 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 860 // 2. The slow path - which is an expansion of the regular method entry. 861 // 862 // Notes:- 863 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 864 // * We may jump to the slow path iff the receiver is null. If the 865 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 866 // Thus we can use the regular method entry code to generate the NPE. 867 // 868 // Rmethod: Method* 869 // Rthread: thread 870 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path 871 // Rparams: parameters 872 873 address entry = __ pc(); 874 Label slow_path; 875 const Register Rthis = R0; 876 const Register Rret_addr = Rtmp_save1; 877 assert_different_registers(Rthis, Rret_addr, Rsender_sp); 878 879 const int referent_offset = java_lang_ref_Reference::referent_offset; 880 guarantee(referent_offset > 0, "referent offset not initialized"); 881 882 // Check if local 0 != NULL 883 // If the receiver is null then it is OK to jump to the slow path. 884 __ ldr(Rthis, Address(Rparams)); 885 __ cbz(Rthis, slow_path); 886 887 // Preserve LR 888 __ mov(Rret_addr, LR); 889 890 // Load the value of the referent field. 891 const Address field_address(Rthis, referent_offset); 892 __ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF); 893 894 // _areturn 895 __ mov(SP, Rsender_sp); 896 __ ret(Rret_addr); 897 898 // generate a vanilla interpreter entry as the slow path 899 __ bind(slow_path); 900 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 901 return entry; 902 } 903 904 // Not supported 905 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; } 906 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 907 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 908 909 // 910 // Interpreter stub for calling a native method. (asm interpreter) 911 // This sets up a somewhat different looking stack for calling the native method 912 // than the typical interpreter frame setup. 913 // 914 915 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 916 // determine code generation flags 917 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 918 919 // Incoming registers: 920 // 921 // Rmethod: Method* 922 // Rthread: thread 923 // Rsender_sp: sender sp 924 // Rparams: parameters 925 926 address entry_point = __ pc(); 927 928 // Register allocation 929 const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6); 930 const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */); 931 const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */); 932 const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6); 933 934 #ifdef AARCH64 935 const Register RconstMethod = R10; // also used in generate_fixed_frame (should match) 936 const Register Rsaved_result = Rnative_code; 937 const FloatRegister Dsaved_result = V8; 938 #else 939 const Register Rsaved_result_lo = Rtmp_save0; // R4 940 const Register Rsaved_result_hi = Rtmp_save1; // R5 941 FloatRegister saved_result_fp; 942 #endif // AARCH64 943 944 945 #ifdef AARCH64 946 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 947 __ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 948 #else 949 __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset())); 950 __ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset())); 951 #endif // AARCH64 952 953 // native calls don't need the stack size check since they have no expression stack 954 // and the arguments are already on the stack and we only add a handful of words 955 // to the stack 956 957 // compute beginning of parameters (Rlocals) 958 __ sub(Rlocals, Rparams, wordSize); 959 __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize)); 960 961 #ifdef AARCH64 962 int extra_stack_reserve = 2*wordSize; // extra space for oop_temp 963 if(__ can_post_interpreter_events()) { 964 // extra space for saved results 965 extra_stack_reserve += 2*wordSize; 966 } 967 // reserve extra stack space and nullify oop_temp slot 968 __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed)); 969 #else 970 // reserve stack space for oop_temp 971 __ mov(R0, 0); 972 __ push(R0); 973 #endif // AARCH64 974 975 generate_fixed_frame(true); // Note: R9 is now saved in the frame 976 977 // make sure method is native & not abstract 978 #ifdef ASSERT 979 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 980 { 981 Label L; 982 __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L); 983 __ stop("tried to execute non-native method as native"); 984 __ bind(L); 985 } 986 { Label L; 987 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 988 __ stop("tried to execute abstract method in interpreter"); 989 __ bind(L); 990 } 991 #endif 992 993 // increment invocation count & check for overflow 994 Label invocation_counter_overflow; 995 if (inc_counter) { 996 if (synchronized) { 997 // Avoid unlocking method's monitor in case of exception, as it has not 998 // been locked yet. 999 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1000 } 1001 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1002 } 1003 1004 Label continue_after_compile; 1005 __ bind(continue_after_compile); 1006 1007 if (inc_counter && synchronized) { 1008 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1009 } 1010 1011 // check for synchronized methods 1012 // Must happen AFTER invocation_counter check and stack overflow check, 1013 // so method is not locked if overflows. 1014 // 1015 if (synchronized) { 1016 lock_method(); 1017 } else { 1018 // no synchronization necessary 1019 #ifdef ASSERT 1020 { Label L; 1021 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1022 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1023 __ stop("method needs synchronization"); 1024 __ bind(L); 1025 } 1026 #endif 1027 } 1028 1029 // start execution 1030 #ifdef ASSERT 1031 { Label L; 1032 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1033 __ cmp(Rtemp, Rstack_top); 1034 __ b(L, eq); 1035 __ stop("broken stack frame setup in interpreter"); 1036 __ bind(L); 1037 } 1038 #endif 1039 __ check_extended_sp(Rtemp); 1040 1041 // jvmti/dtrace support 1042 __ notify_method_entry(); 1043 #if R9_IS_SCRATCHED 1044 __ restore_method(); 1045 #endif 1046 1047 { 1048 Label L; 1049 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1050 __ cbnz(Rsig_handler, L); 1051 __ mov(R1, Rmethod); 1052 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true); 1053 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1054 __ bind(L); 1055 } 1056 1057 { 1058 Label L; 1059 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1060 __ cbnz(Rnative_code, L); 1061 __ mov(R1, Rmethod); 1062 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1); 1063 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1064 __ bind(L); 1065 } 1066 1067 // Allocate stack space for arguments 1068 1069 #ifdef AARCH64 1070 __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord); 1071 __ align_reg(SP, Rtemp, StackAlignmentInBytes); 1072 1073 // Allocate more stack space to accomodate all arguments passed on GP and FP registers: 1074 // 8 * wordSize for GPRs 1075 // 8 * wordSize for FPRs 1076 int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes); 1077 #else 1078 1079 // C functions need aligned stack 1080 __ bic(SP, SP, StackAlignmentInBytes - 1); 1081 // Multiply by BytesPerLong instead of BytesPerWord, because calling convention 1082 // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong) 1083 __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong)); 1084 1085 #ifdef __ABI_HARD__ 1086 // Allocate more stack space to accomodate all GP as well as FP registers: 1087 // 4 * wordSize 1088 // 8 * BytesPerLong 1089 int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes); 1090 #else 1091 // Reserve at least 4 words on the stack for loading 1092 // of parameters passed on registers (R0-R3). 1093 // See generate_slow_signature_handler(). 1094 // It is also used for JNIEnv & class additional parameters. 1095 int reg_arguments = 4 * wordSize; 1096 #endif // __ABI_HARD__ 1097 #endif // AARCH64 1098 1099 __ sub(SP, SP, reg_arguments); 1100 1101 1102 // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers. 1103 // See AbstractInterpreterGenerator::generate_slow_signature_handler(). 1104 __ call(Rsig_handler); 1105 #if R9_IS_SCRATCHED 1106 __ restore_method(); 1107 #endif 1108 __ mov(Rresult_handler, R0); 1109 1110 // Pass JNIEnv and mirror for static methods 1111 { 1112 Label L; 1113 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1114 __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset())); 1115 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L); 1116 __ load_mirror(Rtemp, Rmethod, Rtemp); 1117 __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize); 1118 __ str(Rtemp, Address(R1, 0)); 1119 __ bind(L); 1120 } 1121 1122 __ set_last_Java_frame(SP, FP, true, Rtemp); 1123 1124 // Changing state to _thread_in_native must be the last thing to do 1125 // before the jump to native code. At this moment stack must be 1126 // safepoint-safe and completely prepared for stack walking. 1127 #ifdef ASSERT 1128 { 1129 Label L; 1130 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1131 __ cmp_32(Rtemp, _thread_in_Java); 1132 __ b(L, eq); 1133 __ stop("invalid thread state"); 1134 __ bind(L); 1135 } 1136 #endif 1137 1138 #ifdef AARCH64 1139 __ mov(Rtemp, _thread_in_native); 1140 __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset())); 1141 // STLR is used to force all preceding writes to be observed prior to thread state change 1142 __ stlr_w(Rtemp, Rtemp2); 1143 #else 1144 // Force all preceding writes to be observed prior to thread state change 1145 __ membar(MacroAssembler::StoreStore, Rtemp); 1146 1147 __ mov(Rtemp, _thread_in_native); 1148 __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1149 #endif // AARCH64 1150 1151 __ call(Rnative_code); 1152 #if R9_IS_SCRATCHED 1153 __ restore_method(); 1154 #endif 1155 1156 // Set FPSCR/FPCR to a known state 1157 if (AlwaysRestoreFPU) { 1158 __ restore_default_fp_mode(); 1159 } 1160 1161 // Do safepoint check 1162 __ mov(Rtemp, _thread_in_native_trans); 1163 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1164 1165 // Force this write out before the read below 1166 __ membar(MacroAssembler::StoreLoad, Rtemp); 1167 1168 __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state()); 1169 1170 // Protect the return value in the interleaved code: save it to callee-save registers. 1171 #ifdef AARCH64 1172 __ mov(Rsaved_result, R0); 1173 __ fmov_d(Dsaved_result, D0); 1174 #else 1175 __ mov(Rsaved_result_lo, R0); 1176 __ mov(Rsaved_result_hi, R1); 1177 #ifdef __ABI_HARD__ 1178 // preserve native FP result in a callee-saved register 1179 saved_result_fp = D8; 1180 __ fcpyd(saved_result_fp, D0); 1181 #else 1182 saved_result_fp = fnoreg; 1183 #endif // __ABI_HARD__ 1184 #endif // AARCH64 1185 1186 { 1187 __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset())); 1188 __ cmp(Rtemp, SafepointSynchronize::_not_synchronized); 1189 __ cond_cmp(R3, 0, eq); 1190 1191 #ifdef AARCH64 1192 Label L; 1193 __ b(L, eq); 1194 __ mov(R0, Rthread); 1195 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none); 1196 __ bind(L); 1197 #else 1198 __ mov(R0, Rthread, ne); 1199 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne); 1200 #if R9_IS_SCRATCHED 1201 __ restore_method(); 1202 #endif 1203 #endif // AARCH64 1204 } 1205 1206 // Perform Native->Java thread transition 1207 __ mov(Rtemp, _thread_in_Java); 1208 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1209 1210 // Zero handles and last_java_sp 1211 __ reset_last_Java_frame(Rtemp); 1212 __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset())); 1213 __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes())); 1214 if (CheckJNICalls) { 1215 __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1216 } 1217 1218 // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop. 1219 { 1220 Label Lnot_oop; 1221 #ifdef AARCH64 1222 __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT)); 1223 __ cmp(Rresult_handler, Rtemp); 1224 __ b(Lnot_oop, ne); 1225 #else // !AARCH64 1226 // For ARM32, Rresult_handler is -1 for oop result, 0 otherwise. 1227 __ cbz(Rresult_handler, Lnot_oop); 1228 #endif // !AARCH64 1229 Register value = AARCH64_ONLY(Rsaved_result) NOT_AARCH64(Rsaved_result_lo); 1230 __ resolve_jobject(value, // value 1231 Rtemp, // tmp1 1232 R1_tmp); // tmp2 1233 // Store resolved result in frame for GC visibility. 1234 __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 1235 __ bind(Lnot_oop); 1236 } 1237 1238 #ifdef AARCH64 1239 // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame 1240 __ restore_sp_after_call(Rtemp); 1241 __ check_stack_top(); 1242 #endif // AARCH64 1243 1244 // reguard stack if StackOverflow exception happened while in native. 1245 { 1246 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset())); 1247 __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled); 1248 #ifdef AARCH64 1249 Label L; 1250 __ b(L, ne); 1251 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none); 1252 __ bind(L); 1253 #else 1254 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq); 1255 #if R9_IS_SCRATCHED 1256 __ restore_method(); 1257 #endif 1258 #endif // AARCH64 1259 } 1260 1261 // check pending exceptions 1262 { 1263 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 1264 #ifdef AARCH64 1265 Label L; 1266 __ cbz(Rtemp, L); 1267 __ mov_pc_to(Rexception_pc); 1268 __ b(StubRoutines::forward_exception_entry()); 1269 __ bind(L); 1270 #else 1271 __ cmp(Rtemp, 0); 1272 __ mov(Rexception_pc, PC, ne); 1273 __ b(StubRoutines::forward_exception_entry(), ne); 1274 #endif // AARCH64 1275 } 1276 1277 if (synchronized) { 1278 // address of first monitor 1279 __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize); 1280 __ unlock_object(R1); 1281 } 1282 1283 // jvmti/dtrace support 1284 // Note: This must happen _after_ handling/throwing any exceptions since 1285 // the exception handler code notifies the runtime of method exits 1286 // too. If this happens before, method entry/exit notifications are 1287 // not properly paired (was bug - gri 11/22/99). 1288 #ifdef AARCH64 1289 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result); 1290 #else 1291 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp); 1292 #endif // AARCH64 1293 1294 // Restore the result. Oop result is restored from the stack. 1295 #ifdef AARCH64 1296 __ mov(R0, Rsaved_result); 1297 __ fmov_d(D0, Dsaved_result); 1298 1299 __ blr(Rresult_handler); 1300 #else 1301 __ cmp(Rresult_handler, 0); 1302 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1303 __ mov(R0, Rsaved_result_lo, eq); 1304 __ mov(R1, Rsaved_result_hi); 1305 1306 #ifdef __ABI_HARD__ 1307 // reload native FP result 1308 __ fcpyd(D0, D8); 1309 #endif // __ABI_HARD__ 1310 1311 #ifdef ASSERT 1312 if (VerifyOops) { 1313 Label L; 1314 __ cmp(Rresult_handler, 0); 1315 __ b(L, eq); 1316 __ verify_oop(R0); 1317 __ bind(L); 1318 } 1319 #endif // ASSERT 1320 #endif // AARCH64 1321 1322 // Restore FP/LR, sender_sp and return 1323 #ifdef AARCH64 1324 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 1325 __ ldp(FP, LR, Address(FP)); 1326 __ mov(SP, Rtemp); 1327 #else 1328 __ mov(Rtemp, FP); 1329 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 1330 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1331 #endif // AARCH64 1332 1333 __ ret(); 1334 1335 if (inc_counter) { 1336 // Handle overflow of counter and compile method 1337 __ bind(invocation_counter_overflow); 1338 generate_counter_overflow(continue_after_compile); 1339 } 1340 1341 return entry_point; 1342 } 1343 1344 // 1345 // Generic interpreted method entry to (asm) interpreter 1346 // 1347 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1348 // determine code generation flags 1349 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1350 1351 // Rmethod: Method* 1352 // Rthread: thread 1353 // Rsender_sp: sender sp (could differ from SP if we were called via c2i) 1354 // Rparams: pointer to the last parameter in the stack 1355 1356 address entry_point = __ pc(); 1357 1358 const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3); 1359 1360 #ifdef AARCH64 1361 const Register RmaxStack = R11; 1362 const Register RlocalsBase = R12; 1363 #endif // AARCH64 1364 1365 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 1366 1367 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 1368 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset())); 1369 1370 // setup Rlocals 1371 __ sub(Rlocals, Rparams, wordSize); 1372 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize)); 1373 1374 __ sub(R3, R3, R2); // number of additional locals 1375 1376 #ifdef AARCH64 1377 // setup RmaxStack 1378 __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); 1379 // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots, 1380 // none of which are at the same time, so we just need to make sure there is enough room 1381 // for the biggest user: 1382 // -reserved slot for exception handler 1383 // -reserved slots for JSR292. Method::extra_stack_entries() is the size. 1384 // -3 reserved slots so get_method_counters() can save some registers before call_VM(). 1385 __ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries())); 1386 #endif // AARCH64 1387 1388 // see if we've got enough room on the stack for locals plus overhead. 1389 generate_stack_overflow_check(); 1390 1391 #ifdef AARCH64 1392 1393 // allocate space for locals 1394 { 1395 __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize)); 1396 __ align_reg(SP, RlocalsBase, StackAlignmentInBytes); 1397 } 1398 1399 // explicitly initialize locals 1400 { 1401 Label zero_loop, done; 1402 __ cbz(R3, done); 1403 1404 __ tbz(R3, 0, zero_loop); 1405 __ subs(R3, R3, 1); 1406 __ str(ZR, Address(RlocalsBase, wordSize, post_indexed)); 1407 __ b(done, eq); 1408 1409 __ bind(zero_loop); 1410 __ subs(R3, R3, 2); 1411 __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed)); 1412 __ b(zero_loop, ne); 1413 1414 __ bind(done); 1415 } 1416 1417 #else 1418 // allocate space for locals 1419 // explicitly initialize locals 1420 1421 // Loop is unrolled 4 times 1422 Label loop; 1423 __ mov(R0, 0); 1424 __ bind(loop); 1425 1426 // #1 1427 __ subs(R3, R3, 1); 1428 __ push(R0, ge); 1429 1430 // #2 1431 __ subs(R3, R3, 1, ge); 1432 __ push(R0, ge); 1433 1434 // #3 1435 __ subs(R3, R3, 1, ge); 1436 __ push(R0, ge); 1437 1438 // #4 1439 __ subs(R3, R3, 1, ge); 1440 __ push(R0, ge); 1441 1442 __ b(loop, gt); 1443 #endif // AARCH64 1444 1445 // initialize fixed part of activation frame 1446 generate_fixed_frame(false); 1447 1448 __ restore_dispatch(); 1449 1450 // make sure method is not native & not abstract 1451 #ifdef ASSERT 1452 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1453 { 1454 Label L; 1455 __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1456 __ stop("tried to execute native method as non-native"); 1457 __ bind(L); 1458 } 1459 { Label L; 1460 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1461 __ stop("tried to execute abstract method in interpreter"); 1462 __ bind(L); 1463 } 1464 #endif 1465 1466 // increment invocation count & check for overflow 1467 Label invocation_counter_overflow; 1468 Label profile_method; 1469 Label profile_method_continue; 1470 if (inc_counter) { 1471 if (synchronized) { 1472 // Avoid unlocking method's monitor in case of exception, as it has not 1473 // been locked yet. 1474 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1475 } 1476 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1477 if (ProfileInterpreter) { 1478 __ bind(profile_method_continue); 1479 } 1480 } 1481 Label continue_after_compile; 1482 __ bind(continue_after_compile); 1483 1484 if (inc_counter && synchronized) { 1485 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1486 } 1487 #if R9_IS_SCRATCHED 1488 __ restore_method(); 1489 #endif 1490 1491 // check for synchronized methods 1492 // Must happen AFTER invocation_counter check and stack overflow check, 1493 // so method is not locked if overflows. 1494 // 1495 if (synchronized) { 1496 // Allocate monitor and lock method 1497 lock_method(); 1498 } else { 1499 // no synchronization necessary 1500 #ifdef ASSERT 1501 { Label L; 1502 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1503 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1504 __ stop("method needs synchronization"); 1505 __ bind(L); 1506 } 1507 #endif 1508 } 1509 1510 // start execution 1511 #ifdef ASSERT 1512 { Label L; 1513 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1514 __ cmp(Rtemp, Rstack_top); 1515 __ b(L, eq); 1516 __ stop("broken stack frame setup in interpreter"); 1517 __ bind(L); 1518 } 1519 #endif 1520 __ check_extended_sp(Rtemp); 1521 1522 // jvmti support 1523 __ notify_method_entry(); 1524 #if R9_IS_SCRATCHED 1525 __ restore_method(); 1526 #endif 1527 1528 __ dispatch_next(vtos); 1529 1530 // invocation counter overflow 1531 if (inc_counter) { 1532 if (ProfileInterpreter) { 1533 // We have decided to profile this method in the interpreter 1534 __ bind(profile_method); 1535 1536 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1537 __ set_method_data_pointer_for_bcp(); 1538 1539 __ b(profile_method_continue); 1540 } 1541 1542 // Handle overflow of counter and compile method 1543 __ bind(invocation_counter_overflow); 1544 generate_counter_overflow(continue_after_compile); 1545 } 1546 1547 return entry_point; 1548 } 1549 1550 //------------------------------------------------------------------------------------------------------------------------ 1551 // Exceptions 1552 1553 void TemplateInterpreterGenerator::generate_throw_exception() { 1554 // Entry point in previous activation (i.e., if the caller was interpreted) 1555 Interpreter::_rethrow_exception_entry = __ pc(); 1556 // Rexception_obj: exception 1557 1558 #ifndef AARCH64 1559 // Clear interpreter_frame_last_sp. 1560 __ mov(Rtemp, 0); 1561 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1562 #endif // !AARCH64 1563 1564 #if R9_IS_SCRATCHED 1565 __ restore_method(); 1566 #endif 1567 __ restore_bcp(); 1568 __ restore_dispatch(); 1569 __ restore_locals(); 1570 1571 #ifdef AARCH64 1572 __ restore_sp_after_call(Rtemp); 1573 #endif // AARCH64 1574 1575 // Entry point for exceptions thrown within interpreter code 1576 Interpreter::_throw_exception_entry = __ pc(); 1577 1578 // expression stack is undefined here 1579 // Rexception_obj: exception 1580 // Rbcp: exception bcp 1581 __ verify_oop(Rexception_obj); 1582 1583 // expression stack must be empty before entering the VM in case of an exception 1584 __ empty_expression_stack(); 1585 // find exception handler address and preserve exception oop 1586 __ mov(R1, Rexception_obj); 1587 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1); 1588 // R0: exception handler entry point 1589 // Rexception_obj: preserved exception oop 1590 // Rbcp: bcp for exception handler 1591 __ push_ptr(Rexception_obj); // push exception which is now the only value on the stack 1592 __ jump(R0); // jump to exception handler (may be _remove_activation_entry!) 1593 1594 // If the exception is not handled in the current frame the frame is removed and 1595 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1596 // 1597 // Note: At this point the bci is still the bxi for the instruction which caused 1598 // the exception and the expression stack is empty. Thus, for any VM calls 1599 // at this point, GC will find a legal oop map (with empty expression stack). 1600 1601 // In current activation 1602 // tos: exception 1603 // Rbcp: exception bcp 1604 1605 // 1606 // JVMTI PopFrame support 1607 // 1608 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1609 1610 #ifdef AARCH64 1611 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1612 #endif // AARCH64 1613 1614 __ empty_expression_stack(); 1615 1616 // Set the popframe_processing bit in _popframe_condition indicating that we are 1617 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1618 // popframe handling cycles. 1619 1620 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1621 __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit); 1622 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1623 1624 { 1625 // Check to see whether we are returning to a deoptimized frame. 1626 // (The PopFrame call ensures that the caller of the popped frame is 1627 // either interpreted or compiled and deoptimizes it if compiled.) 1628 // In this case, we can't call dispatch_next() after the frame is 1629 // popped, but instead must save the incoming arguments and restore 1630 // them after deoptimization has occurred. 1631 // 1632 // Note that we don't compare the return PC against the 1633 // deoptimization blob's unpack entry because of the presence of 1634 // adapter frames in C2. 1635 Label caller_not_deoptimized; 1636 __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize)); 1637 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0); 1638 __ cbnz_32(R0, caller_not_deoptimized); 1639 #ifdef AARCH64 1640 __ NOT_TESTED(); 1641 #endif 1642 1643 // Compute size of arguments for saving when returning to deoptimized caller 1644 __ restore_method(); 1645 __ ldr(R0, Address(Rmethod, Method::const_offset())); 1646 __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset())); 1647 1648 __ logical_shift_left(R1, R0, Interpreter::logStackElementSize); 1649 // Save these arguments 1650 __ restore_locals(); 1651 __ sub(R2, Rlocals, R1); 1652 __ add(R2, R2, wordSize); 1653 __ mov(R0, Rthread); 1654 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2); 1655 1656 __ remove_activation(vtos, LR, 1657 /* throw_monitor_exception */ false, 1658 /* install_monitor_exception */ false, 1659 /* notify_jvmdi */ false); 1660 1661 // Inform deoptimization that it is responsible for restoring these arguments 1662 __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit); 1663 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1664 1665 // Continue in deoptimization handler 1666 __ ret(); 1667 1668 __ bind(caller_not_deoptimized); 1669 } 1670 1671 __ remove_activation(vtos, R4, 1672 /* throw_monitor_exception */ false, 1673 /* install_monitor_exception */ false, 1674 /* notify_jvmdi */ false); 1675 1676 #ifndef AARCH64 1677 // Finish with popframe handling 1678 // A previous I2C followed by a deoptimization might have moved the 1679 // outgoing arguments further up the stack. PopFrame expects the 1680 // mutations to those outgoing arguments to be preserved and other 1681 // constraints basically require this frame to look exactly as 1682 // though it had previously invoked an interpreted activation with 1683 // no space between the top of the expression stack (current 1684 // last_sp) and the top of stack. Rather than force deopt to 1685 // maintain this kind of invariant all the time we call a small 1686 // fixup routine to move the mutated arguments onto the top of our 1687 // expression stack if necessary. 1688 __ mov(R1, SP); 1689 __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1690 // PC must point into interpreter here 1691 __ set_last_Java_frame(SP, FP, true, Rtemp); 1692 __ mov(R0, Rthread); 1693 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2); 1694 __ reset_last_Java_frame(Rtemp); 1695 #endif // !AARCH64 1696 1697 #ifdef AARCH64 1698 __ restore_sp_after_call(Rtemp); 1699 __ restore_stack_top(); 1700 #else 1701 // Restore the last_sp and null it out 1702 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1703 __ mov(Rtemp, (int)NULL_WORD); 1704 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1705 #endif // AARCH64 1706 1707 __ restore_bcp(); 1708 __ restore_dispatch(); 1709 __ restore_locals(); 1710 __ restore_method(); 1711 1712 // The method data pointer was incremented already during 1713 // call profiling. We have to restore the mdp for the current bcp. 1714 if (ProfileInterpreter) { 1715 __ set_method_data_pointer_for_bcp(); 1716 } 1717 1718 // Clear the popframe condition flag 1719 assert(JavaThread::popframe_inactive == 0, "adjust this code"); 1720 __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset())); 1721 1722 #if INCLUDE_JVMTI 1723 { 1724 Label L_done; 1725 1726 __ ldrb(Rtemp, Address(Rbcp, 0)); 1727 __ cmp(Rtemp, Bytecodes::_invokestatic); 1728 __ b(L_done, ne); 1729 1730 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1731 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1732 1733 // get local0 1734 __ ldr(R1, Address(Rlocals, 0)); 1735 __ mov(R2, Rmethod); 1736 __ mov(R3, Rbcp); 1737 __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3); 1738 1739 __ cbz(R0, L_done); 1740 1741 __ str(R0, Address(Rstack_top)); 1742 __ bind(L_done); 1743 } 1744 #endif // INCLUDE_JVMTI 1745 1746 __ dispatch_next(vtos); 1747 // end of PopFrame support 1748 1749 Interpreter::_remove_activation_entry = __ pc(); 1750 1751 // preserve exception over this code sequence 1752 __ pop_ptr(R0_tos); 1753 __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset())); 1754 // remove the activation (without doing throws on illegalMonitorExceptions) 1755 __ remove_activation(vtos, Rexception_pc, false, true, false); 1756 // restore exception 1757 __ get_vm_result(Rexception_obj, Rtemp); 1758 1759 // Inbetween activations - previous activation type unknown yet 1760 // compute continuation point - the continuation point expects 1761 // the following registers set up: 1762 // 1763 // Rexception_obj: exception 1764 // Rexception_pc: return address/pc that threw exception 1765 // SP: expression stack of caller 1766 // FP: frame pointer of caller 1767 __ mov(c_rarg0, Rthread); 1768 __ mov(c_rarg1, Rexception_pc); 1769 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 1770 // Note that an "issuing PC" is actually the next PC after the call 1771 1772 __ jump(R0); // jump to exception handler of caller 1773 } 1774 1775 1776 // 1777 // JVMTI ForceEarlyReturn support 1778 // 1779 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1780 address entry = __ pc(); 1781 1782 #ifdef AARCH64 1783 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1784 #endif // AARCH64 1785 1786 __ restore_bcp(); 1787 __ restore_dispatch(); 1788 __ restore_locals(); 1789 1790 __ empty_expression_stack(); 1791 1792 __ load_earlyret_value(state); 1793 1794 // Clear the earlyret state 1795 __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 1796 1797 assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code"); 1798 __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset())); 1799 1800 __ remove_activation(state, LR, 1801 false, /* throw_monitor_exception */ 1802 false, /* install_monitor_exception */ 1803 true); /* notify_jvmdi */ 1804 1805 #ifndef AARCH64 1806 // According to interpreter calling conventions, result is returned in R0/R1, 1807 // so ftos (S0) and dtos (D0) are moved to R0/R1. 1808 // This conversion should be done after remove_activation, as it uses 1809 // push(state) & pop(state) to preserve return value. 1810 __ convert_tos_to_retval(state); 1811 #endif // !AARCH64 1812 __ ret(); 1813 1814 return entry; 1815 } // end of ForceEarlyReturn support 1816 1817 1818 //------------------------------------------------------------------------------------------------------------------------ 1819 // Helper for vtos entry point generation 1820 1821 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1822 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1823 Label L; 1824 1825 #ifdef __SOFTFP__ 1826 dep = __ pc(); // fall through 1827 #else 1828 fep = __ pc(); __ push(ftos); __ b(L); 1829 dep = __ pc(); __ push(dtos); __ b(L); 1830 #endif // __SOFTFP__ 1831 1832 lep = __ pc(); __ push(ltos); __ b(L); 1833 1834 if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops 1835 aep = __ pc(); __ push(atos); __ b(L); 1836 } else { 1837 aep = __ pc(); // fall through 1838 } 1839 1840 #ifdef __SOFTFP__ 1841 fep = __ pc(); // fall through 1842 #endif // __SOFTFP__ 1843 1844 bep = cep = sep = // fall through 1845 iep = __ pc(); __ push(itos); // fall through 1846 vep = __ pc(); __ bind(L); // fall through 1847 generate_and_dispatch(t); 1848 } 1849 1850 //------------------------------------------------------------------------------------------------------------------------ 1851 1852 // Non-product code 1853 #ifndef PRODUCT 1854 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1855 address entry = __ pc(); 1856 1857 // prepare expression stack 1858 __ push(state); // save tosca 1859 1860 // pass tosca registers as arguments 1861 __ mov(R2, R0_tos); 1862 #ifdef AARCH64 1863 __ mov(R3, ZR); 1864 #else 1865 __ mov(R3, R1_tos_hi); 1866 #endif // AARCH64 1867 __ mov(R1, LR); // save return address 1868 1869 // call tracer 1870 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3); 1871 1872 __ mov(LR, R0); // restore return address 1873 __ pop(state); // restore tosca 1874 1875 // return 1876 __ ret(); 1877 1878 return entry; 1879 } 1880 1881 1882 void TemplateInterpreterGenerator::count_bytecode() { 1883 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true); 1884 } 1885 1886 1887 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1888 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true); 1889 } 1890 1891 1892 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1893 const Register Rindex_addr = R2_tmp; 1894 Label Lcontinue; 1895 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters); 1896 InlinedAddress Lindex((address)&BytecodePairHistogram::_index); 1897 const Register Rcounters_addr = R2_tmp; 1898 const Register Rindex = R4_tmp; 1899 1900 // calculate new index for counter: 1901 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes). 1902 // (_index >> log2_number_of_codes) is previous bytecode 1903 1904 __ ldr_literal(Rindex_addr, Lindex); 1905 __ ldr_s32(Rindex, Address(Rindex_addr)); 1906 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1907 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes)); 1908 __ str_32(Rindex, Address(Rindex_addr)); 1909 1910 // Rindex (R4) contains index of counter 1911 1912 __ ldr_literal(Rcounters_addr, Lcounters); 1913 __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1914 __ adds_32(Rtemp, Rtemp, 1); 1915 __ b(Lcontinue, mi); // avoid overflow 1916 __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1917 1918 __ b(Lcontinue); 1919 1920 __ bind_literal(Lindex); 1921 __ bind_literal(Lcounters); 1922 1923 __ bind(Lcontinue); 1924 } 1925 1926 1927 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1928 // Call a little run-time stub to avoid blow-up for each bytecode. 1929 // The run-time runtime saves the right registers, depending on 1930 // the tosca in-state for the given template. 1931 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1932 "entry must have been generated"); 1933 address trace_entry = Interpreter::trace_code(t->tos_in()); 1934 __ call(trace_entry, relocInfo::none); 1935 } 1936 1937 1938 void TemplateInterpreterGenerator::stop_interpreter_at() { 1939 Label Lcontinue; 1940 const Register stop_at = R2_tmp; 1941 1942 __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value); 1943 __ mov_slow(stop_at, StopInterpreterAt); 1944 1945 // test bytecode counter 1946 __ cmp(Rtemp, stop_at); 1947 __ b(Lcontinue, ne); 1948 1949 __ trace_state("stop_interpreter_at"); 1950 __ breakpoint(); 1951 1952 __ bind(Lcontinue); 1953 } 1954 #endif // !PRODUCT