1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interp_masm.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/templateInterpreterGenerator.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/methodData.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "prims/methodHandles.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/timer.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/debug.hpp" 51 #include "utilities/macros.hpp" 52 53 // Size of interpreter code. Increase if too small. Interpreter will 54 // fail with a guarantee ("not enough space for interpreter generation"); 55 // if too small. 56 // Run with +PrintInterpreter to get the VM to print out the size. 57 // Max size with JVMTI 58 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024; 59 60 #define __ _masm-> 61 62 //------------------------------------------------------------------------------------------------------------------------ 63 64 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 65 address entry = __ pc(); 66 67 // callee-save register for saving LR, shared with generate_native_entry 68 const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0); 69 70 __ mov(Rsaved_ret_addr, LR); 71 72 __ mov(R1, Rmethod); 73 __ mov(R2, Rlocals); 74 __ mov(R3, SP); 75 76 #ifdef AARCH64 77 // expand expr. stack and extended SP to avoid cutting SP in call_VM 78 __ mov(Rstack_top, SP); 79 __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 80 __ check_stack_top(); 81 82 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false); 83 84 __ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed)); 85 __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed)); 86 __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed)); 87 __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed)); 88 89 __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed)); 90 __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed)); 91 __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed)); 92 __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed)); 93 #else 94 95 // Safer to save R9 (when scratched) since callers may have been 96 // written assuming R9 survives. This is suboptimal but 97 // probably not important for this slow case call site. 98 // Note for R9 saving: slow_signature_handler may copy register 99 // arguments above the current SP (passed as R3). It is safe for 100 // call_VM to use push and pop to protect additional values on the 101 // stack if needed. 102 __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/); 103 __ add(SP, SP, wordSize); // Skip R0 104 __ pop(RegisterSet(R1, R3)); // Load arguments passed in registers 105 #ifdef __ABI_HARD__ 106 // Few alternatives to an always-load-FP-registers approach: 107 // - parse method signature to detect FP arguments 108 // - keep a counter/flag on a stack indicationg number of FP arguments in the method. 109 // The later has been originally implemented and tested but a conditional path could 110 // eliminate any gain imposed by avoiding 8 double word loads. 111 __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback); 112 #endif // __ABI_HARD__ 113 #endif // AARCH64 114 115 __ ret(Rsaved_ret_addr); 116 117 return entry; 118 } 119 120 121 // 122 // Various method entries (that c++ and asm interpreter agree upon) 123 //------------------------------------------------------------------------------------------------------------------------ 124 // 125 // 126 127 // Abstract method entry 128 // Attempt to execute abstract method. Throw exception 129 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 130 address entry_point = __ pc(); 131 132 #ifdef AARCH64 133 __ restore_sp_after_call(Rtemp); 134 __ restore_stack_top(); 135 #endif 136 137 __ empty_expression_stack(); 138 139 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 140 141 DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here 142 return entry_point; 143 } 144 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 146 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 147 148 // TODO: ARM 149 return NULL; 150 151 address entry_point = __ pc(); 152 STOP("generate_math_entry"); 153 return entry_point; 154 } 155 156 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 157 address entry = __ pc(); 158 159 // Note: There should be a minimal interpreter frame set up when stack 160 // overflow occurs since we check explicitly for it now. 161 // 162 #ifdef ASSERT 163 { Label L; 164 __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize); 165 __ cmp(SP, Rtemp); // Rtemp = maximal SP for current FP, 166 // (stack grows negative) 167 __ b(L, ls); // check if frame is complete 168 __ stop ("interpreter frame not set up"); 169 __ bind(L); 170 } 171 #endif // ASSERT 172 173 // Restore bcp under the assumption that the current frame is still 174 // interpreted 175 __ restore_bcp(); 176 177 // expression stack must be empty before entering the VM if an exception 178 // happened 179 __ empty_expression_stack(); 180 181 // throw exception 182 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 183 184 __ should_not_reach_here(); 185 186 return entry; 187 } 188 189 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 190 address entry = __ pc(); 191 192 // index is in R4_ArrayIndexOutOfBounds_index 193 194 // expression stack must be empty before entering the VM if an exception happened 195 __ empty_expression_stack(); 196 197 // setup parameters 198 // Array expected in R1. 199 __ mov(R2, R4_ArrayIndexOutOfBounds_index); 200 201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); 202 203 __ nop(); // to avoid filling CPU pipeline with invalid instructions 204 __ nop(); 205 __ should_not_reach_here(); 206 207 return entry; 208 } 209 210 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 211 address entry = __ pc(); 212 213 // object is in R2_ClassCastException_obj 214 215 // expression stack must be empty before entering the VM if an exception 216 // happened 217 __ empty_expression_stack(); 218 219 __ mov(R1, R2_ClassCastException_obj); 220 __ call_VM(noreg, 221 CAST_FROM_FN_PTR(address, 222 InterpreterRuntime::throw_ClassCastException), 223 R1); 224 225 __ should_not_reach_here(); 226 227 return entry; 228 } 229 230 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 231 assert(!pass_oop || message == NULL, "either oop or message but not both"); 232 address entry = __ pc(); 233 234 InlinedString Lname(name); 235 InlinedString Lmessage(message); 236 237 if (pass_oop) { 238 // object is at TOS 239 __ pop_ptr(R2); 240 } 241 242 // expression stack must be empty before entering the VM if an exception happened 243 __ empty_expression_stack(); 244 245 // setup parameters 246 __ ldr_literal(R1, Lname); 247 248 if (pass_oop) { 249 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2); 250 } else { 251 if (message != NULL) { 252 __ ldr_literal(R2, Lmessage); 253 } else { 254 __ mov(R2, 0); 255 } 256 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2); 257 } 258 259 // throw exception 260 __ b(Interpreter::throw_exception_entry()); 261 262 __ nop(); // to avoid filling CPU pipeline with invalid instructions 263 __ nop(); 264 __ bind_literal(Lname); 265 if (!pass_oop && (message != NULL)) { 266 __ bind_literal(Lmessage); 267 } 268 269 return entry; 270 } 271 272 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 273 address entry = __ pc(); 274 275 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 276 277 #ifdef AARCH64 278 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 279 __ restore_stack_top(); 280 #else 281 // Restore stack bottom in case i2c adjusted stack 282 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 283 // and NULL it as marker that SP is now tos until next java call 284 __ mov(Rtemp, (int)NULL_WORD); 285 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 286 #endif // AARCH64 287 288 __ restore_method(); 289 __ restore_bcp(); 290 __ restore_dispatch(); 291 __ restore_locals(); 292 293 const Register Rcache = R2_tmp; 294 const Register Rindex = R3_tmp; 295 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 296 297 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 298 __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 299 __ check_stack_top(); 300 __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); 301 302 #ifndef AARCH64 303 __ convert_retval_to_tos(state); 304 #endif // !AARCH64 305 306 __ check_and_handle_popframe(); 307 __ check_and_handle_earlyret(); 308 309 __ dispatch_next(state, step); 310 311 return entry; 312 } 313 314 315 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 316 address entry = __ pc(); 317 318 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 319 320 #ifdef AARCH64 321 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 322 __ restore_stack_top(); 323 #else 324 // The stack is not extended by deopt but we must NULL last_sp as this 325 // entry is like a "return". 326 __ mov(Rtemp, 0); 327 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 328 #endif // AARCH64 329 330 __ restore_method(); 331 __ restore_bcp(); 332 __ restore_dispatch(); 333 __ restore_locals(); 334 335 // handle exceptions 336 { Label L; 337 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 338 __ cbz(Rtemp, L); 339 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 340 __ should_not_reach_here(); 341 __ bind(L); 342 } 343 344 if (continuation == NULL) { 345 __ dispatch_next(state, step); 346 } else { 347 __ jump_to_entry(continuation); 348 } 349 350 return entry; 351 } 352 353 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 354 #ifdef AARCH64 355 address entry = __ pc(); 356 switch (type) { 357 case T_BOOLEAN: 358 __ tst(R0, 0xff); 359 __ cset(R0, ne); 360 break; 361 case T_CHAR : __ zero_extend(R0, R0, 16); break; 362 case T_BYTE : __ sign_extend(R0, R0, 8); break; 363 case T_SHORT : __ sign_extend(R0, R0, 16); break; 364 case T_INT : // fall through 365 case T_LONG : // fall through 366 case T_VOID : // fall through 367 case T_FLOAT : // fall through 368 case T_DOUBLE : /* nothing to do */ break; 369 case T_OBJECT : 370 // retrieve result from frame 371 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 372 // and verify it 373 __ verify_oop(R0); 374 break; 375 default : ShouldNotReachHere(); 376 } 377 __ ret(); 378 return entry; 379 #else 380 // Result handlers are not used on 32-bit ARM 381 // since the returned value is already in appropriate format. 382 __ should_not_reach_here(); // to avoid empty code block 383 384 // The result handler non-zero indicates an object is returned and this is 385 // used in the native entry code. 386 return type == T_OBJECT ? (address)(-1) : NULL; 387 #endif // AARCH64 388 } 389 390 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 391 address entry = __ pc(); 392 __ push(state); 393 __ call_VM(noreg, runtime_entry); 394 395 // load current bytecode 396 __ ldrb(R3_bytecode, Address(Rbcp)); 397 __ dispatch_only_normal(vtos); 398 return entry; 399 } 400 401 402 // Helpers for commoning out cases in the various type of method entries. 403 // 404 405 // increment invocation count & check for overflow 406 // 407 // Note: checking for negative value instead of overflow 408 // so we have a 'sticky' overflow test 409 // 410 // In: Rmethod. 411 // 412 // Uses R0, R1, Rtemp. 413 // 414 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, 415 Label* profile_method, 416 Label* profile_method_continue) { 417 Label done; 418 const Register Rcounters = Rtemp; 419 const Address invocation_counter(Rcounters, 420 MethodCounters::invocation_counter_offset() + 421 InvocationCounter::counter_offset()); 422 423 // Note: In tiered we increment either counters in MethodCounters* or 424 // in MDO depending if we're profiling or not. 425 if (TieredCompilation) { 426 int increment = InvocationCounter::count_increment; 427 Label no_mdo; 428 if (ProfileInterpreter) { 429 // Are we profiling? 430 __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset())); 431 __ cbz(R1_tmp, no_mdo); 432 // Increment counter in the MDO 433 const Address mdo_invocation_counter(R1_tmp, 434 in_bytes(MethodData::invocation_counter_offset()) + 435 in_bytes(InvocationCounter::counter_offset())); 436 const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset())); 437 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow); 438 __ b(done); 439 } 440 __ bind(no_mdo); 441 __ get_method_counters(Rmethod, Rcounters, done); 442 const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset())); 443 __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow); 444 __ bind(done); 445 } else { // not TieredCompilation 446 const Address backedge_counter(Rcounters, 447 MethodCounters::backedge_counter_offset() + 448 InvocationCounter::counter_offset()); 449 450 const Register Ricnt = R0_tmp; // invocation counter 451 const Register Rbcnt = R1_tmp; // backedge counter 452 453 __ get_method_counters(Rmethod, Rcounters, done); 454 455 if (ProfileInterpreter) { 456 const Register Riic = R1_tmp; 457 __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 458 __ add(Riic, Riic, 1); 459 __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 460 } 461 462 // Update standard invocation counters 463 464 __ ldr_u32(Ricnt, invocation_counter); 465 __ ldr_u32(Rbcnt, backedge_counter); 466 467 __ add(Ricnt, Ricnt, InvocationCounter::count_increment); 468 469 #ifdef AARCH64 470 __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits 471 #else 472 __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits 473 #endif // AARCH64 474 475 __ str_32(Ricnt, invocation_counter); // save invocation count 476 __ add(Ricnt, Ricnt, Rbcnt); // add both counters 477 478 // profile_method is non-null only for interpreted method so 479 // profile_method != NULL == !native_call 480 // BytecodeInterpreter only calls for native so code is elided. 481 482 if (ProfileInterpreter && profile_method != NULL) { 483 assert(profile_method_continue != NULL, "should be non-null"); 484 485 // Test to see if we should create a method data oop 486 // Reuse R1_tmp as we don't need backedge counters anymore. 487 Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 488 __ ldr_s32(R1_tmp, profile_limit); 489 __ cmp_32(Ricnt, R1_tmp); 490 __ b(*profile_method_continue, lt); 491 492 // if no method data exists, go to profile_method 493 __ test_method_data_pointer(R1_tmp, *profile_method); 494 } 495 496 Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 497 __ ldr_s32(R1_tmp, invoke_limit); 498 __ cmp_32(Ricnt, R1_tmp); 499 __ b(*overflow, hs); 500 __ bind(done); 501 } 502 } 503 504 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 505 // InterpreterRuntime::frequency_counter_overflow takes one argument 506 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 507 // The call returns the address of the verified entry point for the method or NULL 508 // if the compilation did not complete (either went background or bailed out). 509 __ mov(R1, (int)false); 510 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 511 512 // jump to the interpreted entry. 513 __ b(do_continue); 514 } 515 516 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 517 // Check if we've got enough room on the stack for 518 // - overhead; 519 // - locals; 520 // - expression stack. 521 // 522 // Registers on entry: 523 // 524 // R3 = number of additional locals 525 // R11 = max expression stack slots (AArch64 only) 526 // Rthread 527 // Rmethod 528 // Registers used: R0, R1, R2, Rtemp. 529 530 const Register Radditional_locals = R3; 531 const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2); 532 533 // monitor entry size 534 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 535 536 // total overhead size: entry_size + (saved registers, thru expr stack bottom). 537 // be sure to change this if you add/subtract anything to/from the overhead area 538 const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size; 539 540 // Pages reserved for VM runtime calls and subsequent Java calls. 541 const int reserved_pages = JavaThread::stack_shadow_zone_size(); 542 543 // Thread::stack_size() includes guard pages, and they should not be touched. 544 const int guard_pages = JavaThread::stack_guard_zone_size(); 545 546 __ ldr(R0, Address(Rthread, Thread::stack_base_offset())); 547 __ ldr(R1, Address(Rthread, Thread::stack_size_offset())); 548 #ifndef AARCH64 549 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 550 __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset())); 551 #endif // !AARCH64 552 __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words()); 553 554 // reserve space for additional locals 555 __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize)); 556 557 // stack size 558 __ sub(R0, R0, R1); 559 560 // reserve space for expression stack 561 __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 562 563 __ cmp(Rtemp, R0); 564 565 #ifdef AARCH64 566 Label L; 567 __ b(L, hi); 568 __ mov(SP, Rsender_sp); // restore SP 569 __ b(StubRoutines::throw_StackOverflowError_entry()); 570 __ bind(L); 571 #else 572 __ mov(SP, Rsender_sp, ls); // restore SP 573 __ b(StubRoutines::throw_StackOverflowError_entry(), ls); 574 #endif // AARCH64 575 } 576 577 578 // Allocate monitor and lock method (asm interpreter) 579 // 580 void TemplateInterpreterGenerator::lock_method() { 581 // synchronize method 582 583 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 584 assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment"); 585 586 #ifdef ASSERT 587 { Label L; 588 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 589 __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 590 __ stop("method doesn't need synchronization"); 591 __ bind(L); 592 } 593 #endif // ASSERT 594 595 // get synchronization object 596 { Label done; 597 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 598 #ifdef AARCH64 599 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 600 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done); 601 #else 602 __ tst(Rtemp, JVM_ACC_STATIC); 603 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case) 604 __ b(done, eq); 605 #endif // AARCH64 606 __ load_mirror(R0, Rmethod, Rtemp); 607 __ bind(done); 608 } 609 610 // add space for monitor & lock 611 612 #ifdef AARCH64 613 __ check_extended_sp(Rtemp); 614 __ sub(SP, SP, entry_size); // adjust extended SP 615 __ mov(Rtemp, SP); 616 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 617 #endif // AARCH64 618 619 __ sub(Rstack_top, Rstack_top, entry_size); 620 __ check_stack_top_on_expansion(); 621 // add space for a monitor entry 622 __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 623 // set new monitor block top 624 __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes())); 625 // store object 626 __ mov(R1, Rstack_top); // monitor entry address 627 __ lock_object(R1); 628 } 629 630 #ifdef AARCH64 631 632 // 633 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 634 // and for native methods hence the shared code. 635 // 636 // On entry: 637 // R10 = ConstMethod 638 // R11 = max expr. stack (in slots), if !native_call 639 // 640 // On exit: 641 // Rbcp, Rstack_top are initialized, SP is extended 642 // 643 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 644 // Incoming registers 645 const Register RconstMethod = R10; 646 const Register RmaxStack = R11; 647 // Temporary registers 648 const Register RextendedSP = R0; 649 const Register Rcache = R1; 650 const Register Rmdp = ProfileInterpreter ? R2 : ZR; 651 652 // Generates the following stack layout (stack grows up in this picture): 653 // 654 // [ expr. stack bottom ] 655 // [ saved Rbcp ] 656 // [ current Rlocals ] 657 // [ cache ] 658 // [ mdx ] 659 // [ mirror ] 660 // [ Method* ] 661 // [ extended SP ] 662 // [ expr. stack top ] 663 // [ sender_sp ] 664 // [ saved FP ] <--- FP 665 // [ saved LR ] 666 667 // initialize fixed part of activation frame 668 __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed)); 669 __ mov(FP, SP); // establish new FP 670 671 // setup Rbcp 672 if (native_call) { 673 __ mov(Rbcp, ZR); // bcp = 0 for native calls 674 } else { 675 __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase 676 } 677 678 // Rstack_top & RextendedSP 679 __ sub(Rstack_top, SP, 10*wordSize); 680 if (native_call) { 681 __ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling 682 } else { 683 __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 684 __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes); 685 } 686 __ mov(SP, RextendedSP); 687 __ check_stack_top(); 688 689 // Load Rmdp 690 if (ProfileInterpreter) { 691 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 692 __ tst(Rtemp, Rtemp); 693 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 694 __ csel(Rmdp, ZR, Rtemp, eq); 695 } 696 697 // Load Rcache 698 __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset())); 699 __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 700 // Get mirror and store it in the frame as GC root for this Method* 701 __ load_mirror(Rtemp, Rmethod, Rtemp); 702 703 // Build fixed frame 704 __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize)); 705 __ stp(Rlocals, Rcache, Address(FP, -8*wordSize)); 706 __ stp(Rmdp, Rtemp, Address(FP, -6*wordSize)); 707 __ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize)); 708 __ stp(ZR, Rsender_sp, Address(FP, -2*wordSize)); 709 assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken"); 710 assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken"); 711 } 712 713 #else // AARCH64 714 715 // 716 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 717 // and for native methods hence the shared code. 718 719 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 720 // Generates the following stack layout: 721 // 722 // [ expr. stack bottom ] 723 // [ saved Rbcp ] 724 // [ current Rlocals ] 725 // [ cache ] 726 // [ mdx ] 727 // [ Method* ] 728 // [ last_sp ] 729 // [ sender_sp ] 730 // [ saved FP ] <--- FP 731 // [ saved LR ] 732 733 // initialize fixed part of activation frame 734 __ push(LR); // save return address 735 __ push(FP); // save FP 736 __ mov(FP, SP); // establish new FP 737 738 __ push(Rsender_sp); 739 740 __ mov(R0, 0); 741 __ push(R0); // leave last_sp as null 742 743 // setup Rbcp 744 if (native_call) { 745 __ mov(Rbcp, 0); // bcp = 0 for native calls 746 } else { 747 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod* 748 __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase 749 } 750 751 __ push(Rmethod); // save Method* 752 // Get mirror and store it in the frame as GC root for this Method* 753 __ load_mirror(Rtemp, Rmethod, Rtemp); 754 __ push(Rtemp); 755 756 if (ProfileInterpreter) { 757 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 758 __ tst(Rtemp, Rtemp); 759 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne); 760 __ push(Rtemp); // set the mdp (method data pointer) 761 } else { 762 __ push(R0); 763 } 764 765 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 766 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 767 __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 768 __ push(Rtemp); // set constant pool cache 769 __ push(Rlocals); // set locals pointer 770 __ push(Rbcp); // set bcp 771 __ push(R0); // reserve word for pointer to expression stack bottom 772 __ str(SP, Address(SP, 0)); // set expression stack bottom 773 } 774 775 #endif // AARCH64 776 777 // End of helpers 778 779 //------------------------------------------------------------------------------------------------------------------------ 780 // Entry points 781 // 782 // Here we generate the various kind of entries into the interpreter. 783 // The two main entry type are generic bytecode methods and native call method. 784 // These both come in synchronized and non-synchronized versions but the 785 // frame layout they create is very similar. The other method entry 786 // types are really just special purpose entries that are really entry 787 // and interpretation all in one. These are for trivial methods like 788 // accessor, empty, or special math methods. 789 // 790 // When control flow reaches any of the entry types for the interpreter 791 // the following holds -> 792 // 793 // Arguments: 794 // 795 // Rmethod: Method* 796 // Rthread: thread 797 // Rsender_sp: sender sp 798 // Rparams (SP on 32-bit ARM): pointer to method parameters 799 // 800 // LR: return address 801 // 802 // Stack layout immediately at entry 803 // 804 // [ optional padding(*)] <--- SP (AArch64) 805 // [ parameter n ] <--- Rparams (SP on 32-bit ARM) 806 // ... 807 // [ parameter 1 ] 808 // [ expression stack ] (caller's java expression stack) 809 810 // Assuming that we don't go to one of the trivial specialized 811 // entries the stack will look like below when we are ready to execute 812 // the first bytecode (or call the native routine). The register usage 813 // will be as the template based interpreter expects. 814 // 815 // local variables follow incoming parameters immediately; i.e. 816 // the return address is saved at the end of the locals. 817 // 818 // [ reserved stack (*) ] <--- SP (AArch64) 819 // [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM) 820 // [ monitor entry ] 821 // ... 822 // [ monitor entry ] 823 // [ expr. stack bottom ] 824 // [ saved Rbcp ] 825 // [ current Rlocals ] 826 // [ cache ] 827 // [ mdx ] 828 // [ mirror ] 829 // [ Method* ] 830 // 831 // 32-bit ARM: 832 // [ last_sp ] 833 // 834 // AArch64: 835 // [ extended SP (*) ] 836 // [ stack top (*) ] 837 // 838 // [ sender_sp ] 839 // [ saved FP ] <--- FP 840 // [ saved LR ] 841 // [ optional padding(*)] 842 // [ local variable m ] 843 // ... 844 // [ local variable 1 ] 845 // [ parameter n ] 846 // ... 847 // [ parameter 1 ] <--- Rlocals 848 // 849 // (*) - AArch64 only 850 // 851 852 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 853 // Code: _aload_0, _getfield, _areturn 854 // parameter size = 1 855 // 856 // The code that gets generated by this routine is split into 2 parts: 857 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 858 // 2. The slow path - which is an expansion of the regular method entry. 859 // 860 // Notes:- 861 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 862 // * We may jump to the slow path iff the receiver is null. If the 863 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 864 // Thus we can use the regular method entry code to generate the NPE. 865 // 866 // Rmethod: Method* 867 // Rthread: thread 868 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path 869 // Rparams: parameters 870 871 address entry = __ pc(); 872 Label slow_path; 873 const Register Rthis = R0; 874 const Register Rret_addr = Rtmp_save1; 875 assert_different_registers(Rthis, Rret_addr, Rsender_sp); 876 877 const int referent_offset = java_lang_ref_Reference::referent_offset; 878 guarantee(referent_offset > 0, "referent offset not initialized"); 879 880 // Check if local 0 != NULL 881 // If the receiver is null then it is OK to jump to the slow path. 882 __ ldr(Rthis, Address(Rparams)); 883 __ cbz(Rthis, slow_path); 884 885 // Preserve LR 886 __ mov(Rret_addr, LR); 887 888 // Load the value of the referent field. 889 const Address field_address(Rthis, referent_offset); 890 __ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF); 891 892 // _areturn 893 __ mov(SP, Rsender_sp); 894 __ ret(Rret_addr); 895 896 // generate a vanilla interpreter entry as the slow path 897 __ bind(slow_path); 898 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 899 return entry; 900 } 901 902 // Not supported 903 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; } 904 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 905 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 906 907 // 908 // Interpreter stub for calling a native method. (asm interpreter) 909 // This sets up a somewhat different looking stack for calling the native method 910 // than the typical interpreter frame setup. 911 // 912 913 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 914 // determine code generation flags 915 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 916 917 // Incoming registers: 918 // 919 // Rmethod: Method* 920 // Rthread: thread 921 // Rsender_sp: sender sp 922 // Rparams: parameters 923 924 address entry_point = __ pc(); 925 926 // Register allocation 927 const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6); 928 const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */); 929 const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */); 930 const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6); 931 932 #ifdef AARCH64 933 const Register RconstMethod = R10; // also used in generate_fixed_frame (should match) 934 const Register Rsaved_result = Rnative_code; 935 const FloatRegister Dsaved_result = V8; 936 #else 937 const Register Rsaved_result_lo = Rtmp_save0; // R4 938 const Register Rsaved_result_hi = Rtmp_save1; // R5 939 FloatRegister saved_result_fp; 940 #endif // AARCH64 941 942 943 #ifdef AARCH64 944 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 945 __ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 946 #else 947 __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset())); 948 __ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset())); 949 #endif // AARCH64 950 951 // native calls don't need the stack size check since they have no expression stack 952 // and the arguments are already on the stack and we only add a handful of words 953 // to the stack 954 955 // compute beginning of parameters (Rlocals) 956 __ sub(Rlocals, Rparams, wordSize); 957 __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize)); 958 959 #ifdef AARCH64 960 int extra_stack_reserve = 2*wordSize; // extra space for oop_temp 961 if(__ can_post_interpreter_events()) { 962 // extra space for saved results 963 extra_stack_reserve += 2*wordSize; 964 } 965 // reserve extra stack space and nullify oop_temp slot 966 __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed)); 967 #else 968 // reserve stack space for oop_temp 969 __ mov(R0, 0); 970 __ push(R0); 971 #endif // AARCH64 972 973 generate_fixed_frame(true); // Note: R9 is now saved in the frame 974 975 // make sure method is native & not abstract 976 #ifdef ASSERT 977 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 978 { 979 Label L; 980 __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L); 981 __ stop("tried to execute non-native method as native"); 982 __ bind(L); 983 } 984 { Label L; 985 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 986 __ stop("tried to execute abstract method in interpreter"); 987 __ bind(L); 988 } 989 #endif 990 991 // increment invocation count & check for overflow 992 Label invocation_counter_overflow; 993 if (inc_counter) { 994 if (synchronized) { 995 // Avoid unlocking method's monitor in case of exception, as it has not 996 // been locked yet. 997 __ set_do_not_unlock_if_synchronized(true, Rtemp); 998 } 999 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1000 } 1001 1002 Label continue_after_compile; 1003 __ bind(continue_after_compile); 1004 1005 if (inc_counter && synchronized) { 1006 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1007 } 1008 1009 // check for synchronized methods 1010 // Must happen AFTER invocation_counter check and stack overflow check, 1011 // so method is not locked if overflows. 1012 // 1013 if (synchronized) { 1014 lock_method(); 1015 } else { 1016 // no synchronization necessary 1017 #ifdef ASSERT 1018 { Label L; 1019 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1020 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1021 __ stop("method needs synchronization"); 1022 __ bind(L); 1023 } 1024 #endif 1025 } 1026 1027 // start execution 1028 #ifdef ASSERT 1029 { Label L; 1030 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1031 __ cmp(Rtemp, Rstack_top); 1032 __ b(L, eq); 1033 __ stop("broken stack frame setup in interpreter"); 1034 __ bind(L); 1035 } 1036 #endif 1037 __ check_extended_sp(Rtemp); 1038 1039 // jvmti/dtrace support 1040 __ notify_method_entry(); 1041 #if R9_IS_SCRATCHED 1042 __ restore_method(); 1043 #endif 1044 1045 { 1046 Label L; 1047 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1048 __ cbnz(Rsig_handler, L); 1049 __ mov(R1, Rmethod); 1050 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true); 1051 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1052 __ bind(L); 1053 } 1054 1055 { 1056 Label L; 1057 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1058 __ cbnz(Rnative_code, L); 1059 __ mov(R1, Rmethod); 1060 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1); 1061 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1062 __ bind(L); 1063 } 1064 1065 // Allocate stack space for arguments 1066 1067 #ifdef AARCH64 1068 __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord); 1069 __ align_reg(SP, Rtemp, StackAlignmentInBytes); 1070 1071 // Allocate more stack space to accomodate all arguments passed on GP and FP registers: 1072 // 8 * wordSize for GPRs 1073 // 8 * wordSize for FPRs 1074 int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes); 1075 #else 1076 1077 // C functions need aligned stack 1078 __ bic(SP, SP, StackAlignmentInBytes - 1); 1079 // Multiply by BytesPerLong instead of BytesPerWord, because calling convention 1080 // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong) 1081 __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong)); 1082 1083 #ifdef __ABI_HARD__ 1084 // Allocate more stack space to accomodate all GP as well as FP registers: 1085 // 4 * wordSize 1086 // 8 * BytesPerLong 1087 int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes); 1088 #else 1089 // Reserve at least 4 words on the stack for loading 1090 // of parameters passed on registers (R0-R3). 1091 // See generate_slow_signature_handler(). 1092 // It is also used for JNIEnv & class additional parameters. 1093 int reg_arguments = 4 * wordSize; 1094 #endif // __ABI_HARD__ 1095 #endif // AARCH64 1096 1097 __ sub(SP, SP, reg_arguments); 1098 1099 1100 // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers. 1101 // See AbstractInterpreterGenerator::generate_slow_signature_handler(). 1102 __ call(Rsig_handler); 1103 #if R9_IS_SCRATCHED 1104 __ restore_method(); 1105 #endif 1106 __ mov(Rresult_handler, R0); 1107 1108 // Pass JNIEnv and mirror for static methods 1109 { 1110 Label L; 1111 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1112 __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset())); 1113 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L); 1114 __ load_mirror(Rtemp, Rmethod, Rtemp); 1115 __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize); 1116 __ str(Rtemp, Address(R1, 0)); 1117 __ bind(L); 1118 } 1119 1120 __ set_last_Java_frame(SP, FP, true, Rtemp); 1121 1122 // Changing state to _thread_in_native must be the last thing to do 1123 // before the jump to native code. At this moment stack must be 1124 // safepoint-safe and completely prepared for stack walking. 1125 #ifdef ASSERT 1126 { 1127 Label L; 1128 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1129 __ cmp_32(Rtemp, _thread_in_Java); 1130 __ b(L, eq); 1131 __ stop("invalid thread state"); 1132 __ bind(L); 1133 } 1134 #endif 1135 1136 #ifdef AARCH64 1137 __ mov(Rtemp, _thread_in_native); 1138 __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset())); 1139 // STLR is used to force all preceding writes to be observed prior to thread state change 1140 __ stlr_w(Rtemp, Rtemp2); 1141 #else 1142 // Force all preceding writes to be observed prior to thread state change 1143 __ membar(MacroAssembler::StoreStore, Rtemp); 1144 1145 __ mov(Rtemp, _thread_in_native); 1146 __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1147 #endif // AARCH64 1148 1149 __ call(Rnative_code); 1150 #if R9_IS_SCRATCHED 1151 __ restore_method(); 1152 #endif 1153 1154 // Set FPSCR/FPCR to a known state 1155 if (AlwaysRestoreFPU) { 1156 __ restore_default_fp_mode(); 1157 } 1158 1159 // Do safepoint check 1160 __ mov(Rtemp, _thread_in_native_trans); 1161 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1162 1163 // Force this write out before the read below 1164 __ membar(MacroAssembler::StoreLoad, Rtemp); 1165 1166 __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state()); 1167 1168 // Protect the return value in the interleaved code: save it to callee-save registers. 1169 #ifdef AARCH64 1170 __ mov(Rsaved_result, R0); 1171 __ fmov_d(Dsaved_result, D0); 1172 #else 1173 __ mov(Rsaved_result_lo, R0); 1174 __ mov(Rsaved_result_hi, R1); 1175 #ifdef __ABI_HARD__ 1176 // preserve native FP result in a callee-saved register 1177 saved_result_fp = D8; 1178 __ fcpyd(saved_result_fp, D0); 1179 #else 1180 saved_result_fp = fnoreg; 1181 #endif // __ABI_HARD__ 1182 #endif // AARCH64 1183 1184 { 1185 __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset())); 1186 __ cmp(Rtemp, SafepointSynchronize::_not_synchronized); 1187 __ cond_cmp(R3, 0, eq); 1188 1189 #ifdef AARCH64 1190 Label L; 1191 __ b(L, eq); 1192 __ mov(R0, Rthread); 1193 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none); 1194 __ bind(L); 1195 #else 1196 __ mov(R0, Rthread, ne); 1197 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne); 1198 #if R9_IS_SCRATCHED 1199 __ restore_method(); 1200 #endif 1201 #endif // AARCH64 1202 } 1203 1204 // Perform Native->Java thread transition 1205 __ mov(Rtemp, _thread_in_Java); 1206 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1207 1208 // Zero handles and last_java_sp 1209 __ reset_last_Java_frame(Rtemp); 1210 __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset())); 1211 __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes())); 1212 if (CheckJNICalls) { 1213 __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1214 } 1215 1216 // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop. 1217 { 1218 Label Lnot_oop; 1219 #ifdef AARCH64 1220 __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT)); 1221 __ cmp(Rresult_handler, Rtemp); 1222 __ b(Lnot_oop, ne); 1223 #else // !AARCH64 1224 // For ARM32, Rresult_handler is -1 for oop result, 0 otherwise. 1225 __ cbz(Rresult_handler, Lnot_oop); 1226 #endif // !AARCH64 1227 Register value = AARCH64_ONLY(Rsaved_result) NOT_AARCH64(Rsaved_result_lo); 1228 __ resolve_jobject(value, // value 1229 Rtemp, // tmp1 1230 R1_tmp); // tmp2 1231 // Store resolved result in frame for GC visibility. 1232 __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 1233 __ bind(Lnot_oop); 1234 } 1235 1236 #ifdef AARCH64 1237 // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame 1238 __ restore_sp_after_call(Rtemp); 1239 __ check_stack_top(); 1240 #endif // AARCH64 1241 1242 // reguard stack if StackOverflow exception happened while in native. 1243 { 1244 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset())); 1245 __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled); 1246 #ifdef AARCH64 1247 Label L; 1248 __ b(L, ne); 1249 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none); 1250 __ bind(L); 1251 #else 1252 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq); 1253 #if R9_IS_SCRATCHED 1254 __ restore_method(); 1255 #endif 1256 #endif // AARCH64 1257 } 1258 1259 // check pending exceptions 1260 { 1261 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 1262 #ifdef AARCH64 1263 Label L; 1264 __ cbz(Rtemp, L); 1265 __ mov_pc_to(Rexception_pc); 1266 __ b(StubRoutines::forward_exception_entry()); 1267 __ bind(L); 1268 #else 1269 __ cmp(Rtemp, 0); 1270 __ mov(Rexception_pc, PC, ne); 1271 __ b(StubRoutines::forward_exception_entry(), ne); 1272 #endif // AARCH64 1273 } 1274 1275 if (synchronized) { 1276 // address of first monitor 1277 __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize); 1278 __ unlock_object(R1); 1279 } 1280 1281 // jvmti/dtrace support 1282 // Note: This must happen _after_ handling/throwing any exceptions since 1283 // the exception handler code notifies the runtime of method exits 1284 // too. If this happens before, method entry/exit notifications are 1285 // not properly paired (was bug - gri 11/22/99). 1286 #ifdef AARCH64 1287 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result); 1288 #else 1289 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp); 1290 #endif // AARCH64 1291 1292 // Restore the result. Oop result is restored from the stack. 1293 #ifdef AARCH64 1294 __ mov(R0, Rsaved_result); 1295 __ fmov_d(D0, Dsaved_result); 1296 1297 __ blr(Rresult_handler); 1298 #else 1299 __ cmp(Rresult_handler, 0); 1300 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1301 __ mov(R0, Rsaved_result_lo, eq); 1302 __ mov(R1, Rsaved_result_hi); 1303 1304 #ifdef __ABI_HARD__ 1305 // reload native FP result 1306 __ fcpyd(D0, D8); 1307 #endif // __ABI_HARD__ 1308 1309 #ifdef ASSERT 1310 if (VerifyOops) { 1311 Label L; 1312 __ cmp(Rresult_handler, 0); 1313 __ b(L, eq); 1314 __ verify_oop(R0); 1315 __ bind(L); 1316 } 1317 #endif // ASSERT 1318 #endif // AARCH64 1319 1320 // Restore FP/LR, sender_sp and return 1321 #ifdef AARCH64 1322 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 1323 __ ldp(FP, LR, Address(FP)); 1324 __ mov(SP, Rtemp); 1325 #else 1326 __ mov(Rtemp, FP); 1327 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 1328 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1329 #endif // AARCH64 1330 1331 __ ret(); 1332 1333 if (inc_counter) { 1334 // Handle overflow of counter and compile method 1335 __ bind(invocation_counter_overflow); 1336 generate_counter_overflow(continue_after_compile); 1337 } 1338 1339 return entry_point; 1340 } 1341 1342 // 1343 // Generic interpreted method entry to (asm) interpreter 1344 // 1345 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1346 // determine code generation flags 1347 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1348 1349 // Rmethod: Method* 1350 // Rthread: thread 1351 // Rsender_sp: sender sp (could differ from SP if we were called via c2i) 1352 // Rparams: pointer to the last parameter in the stack 1353 1354 address entry_point = __ pc(); 1355 1356 const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3); 1357 1358 #ifdef AARCH64 1359 const Register RmaxStack = R11; 1360 const Register RlocalsBase = R12; 1361 #endif // AARCH64 1362 1363 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 1364 1365 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 1366 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset())); 1367 1368 // setup Rlocals 1369 __ sub(Rlocals, Rparams, wordSize); 1370 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize)); 1371 1372 __ sub(R3, R3, R2); // number of additional locals 1373 1374 #ifdef AARCH64 1375 // setup RmaxStack 1376 __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); 1377 // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots, 1378 // none of which are at the same time, so we just need to make sure there is enough room 1379 // for the biggest user: 1380 // -reserved slot for exception handler 1381 // -reserved slots for JSR292. Method::extra_stack_entries() is the size. 1382 // -3 reserved slots so get_method_counters() can save some registers before call_VM(). 1383 __ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries())); 1384 #endif // AARCH64 1385 1386 // see if we've got enough room on the stack for locals plus overhead. 1387 generate_stack_overflow_check(); 1388 1389 #ifdef AARCH64 1390 1391 // allocate space for locals 1392 { 1393 __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize)); 1394 __ align_reg(SP, RlocalsBase, StackAlignmentInBytes); 1395 } 1396 1397 // explicitly initialize locals 1398 { 1399 Label zero_loop, done; 1400 __ cbz(R3, done); 1401 1402 __ tbz(R3, 0, zero_loop); 1403 __ subs(R3, R3, 1); 1404 __ str(ZR, Address(RlocalsBase, wordSize, post_indexed)); 1405 __ b(done, eq); 1406 1407 __ bind(zero_loop); 1408 __ subs(R3, R3, 2); 1409 __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed)); 1410 __ b(zero_loop, ne); 1411 1412 __ bind(done); 1413 } 1414 1415 #else 1416 // allocate space for locals 1417 // explicitly initialize locals 1418 1419 // Loop is unrolled 4 times 1420 Label loop; 1421 __ mov(R0, 0); 1422 __ bind(loop); 1423 1424 // #1 1425 __ subs(R3, R3, 1); 1426 __ push(R0, ge); 1427 1428 // #2 1429 __ subs(R3, R3, 1, ge); 1430 __ push(R0, ge); 1431 1432 // #3 1433 __ subs(R3, R3, 1, ge); 1434 __ push(R0, ge); 1435 1436 // #4 1437 __ subs(R3, R3, 1, ge); 1438 __ push(R0, ge); 1439 1440 __ b(loop, gt); 1441 #endif // AARCH64 1442 1443 // initialize fixed part of activation frame 1444 generate_fixed_frame(false); 1445 1446 __ restore_dispatch(); 1447 1448 // make sure method is not native & not abstract 1449 #ifdef ASSERT 1450 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1451 { 1452 Label L; 1453 __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1454 __ stop("tried to execute native method as non-native"); 1455 __ bind(L); 1456 } 1457 { Label L; 1458 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1459 __ stop("tried to execute abstract method in interpreter"); 1460 __ bind(L); 1461 } 1462 #endif 1463 1464 // increment invocation count & check for overflow 1465 Label invocation_counter_overflow; 1466 Label profile_method; 1467 Label profile_method_continue; 1468 if (inc_counter) { 1469 if (synchronized) { 1470 // Avoid unlocking method's monitor in case of exception, as it has not 1471 // been locked yet. 1472 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1473 } 1474 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1475 if (ProfileInterpreter) { 1476 __ bind(profile_method_continue); 1477 } 1478 } 1479 Label continue_after_compile; 1480 __ bind(continue_after_compile); 1481 1482 if (inc_counter && synchronized) { 1483 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1484 } 1485 #if R9_IS_SCRATCHED 1486 __ restore_method(); 1487 #endif 1488 1489 // check for synchronized methods 1490 // Must happen AFTER invocation_counter check and stack overflow check, 1491 // so method is not locked if overflows. 1492 // 1493 if (synchronized) { 1494 // Allocate monitor and lock method 1495 lock_method(); 1496 } else { 1497 // no synchronization necessary 1498 #ifdef ASSERT 1499 { Label L; 1500 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1501 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1502 __ stop("method needs synchronization"); 1503 __ bind(L); 1504 } 1505 #endif 1506 } 1507 1508 // start execution 1509 #ifdef ASSERT 1510 { Label L; 1511 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1512 __ cmp(Rtemp, Rstack_top); 1513 __ b(L, eq); 1514 __ stop("broken stack frame setup in interpreter"); 1515 __ bind(L); 1516 } 1517 #endif 1518 __ check_extended_sp(Rtemp); 1519 1520 // jvmti support 1521 __ notify_method_entry(); 1522 #if R9_IS_SCRATCHED 1523 __ restore_method(); 1524 #endif 1525 1526 __ dispatch_next(vtos); 1527 1528 // invocation counter overflow 1529 if (inc_counter) { 1530 if (ProfileInterpreter) { 1531 // We have decided to profile this method in the interpreter 1532 __ bind(profile_method); 1533 1534 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1535 __ set_method_data_pointer_for_bcp(); 1536 1537 __ b(profile_method_continue); 1538 } 1539 1540 // Handle overflow of counter and compile method 1541 __ bind(invocation_counter_overflow); 1542 generate_counter_overflow(continue_after_compile); 1543 } 1544 1545 return entry_point; 1546 } 1547 1548 //------------------------------------------------------------------------------------------------------------------------ 1549 // Exceptions 1550 1551 void TemplateInterpreterGenerator::generate_throw_exception() { 1552 // Entry point in previous activation (i.e., if the caller was interpreted) 1553 Interpreter::_rethrow_exception_entry = __ pc(); 1554 // Rexception_obj: exception 1555 1556 #ifndef AARCH64 1557 // Clear interpreter_frame_last_sp. 1558 __ mov(Rtemp, 0); 1559 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1560 #endif // !AARCH64 1561 1562 #if R9_IS_SCRATCHED 1563 __ restore_method(); 1564 #endif 1565 __ restore_bcp(); 1566 __ restore_dispatch(); 1567 __ restore_locals(); 1568 1569 #ifdef AARCH64 1570 __ restore_sp_after_call(Rtemp); 1571 #endif // AARCH64 1572 1573 // Entry point for exceptions thrown within interpreter code 1574 Interpreter::_throw_exception_entry = __ pc(); 1575 1576 // expression stack is undefined here 1577 // Rexception_obj: exception 1578 // Rbcp: exception bcp 1579 __ verify_oop(Rexception_obj); 1580 1581 // expression stack must be empty before entering the VM in case of an exception 1582 __ empty_expression_stack(); 1583 // find exception handler address and preserve exception oop 1584 __ mov(R1, Rexception_obj); 1585 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1); 1586 // R0: exception handler entry point 1587 // Rexception_obj: preserved exception oop 1588 // Rbcp: bcp for exception handler 1589 __ push_ptr(Rexception_obj); // push exception which is now the only value on the stack 1590 __ jump(R0); // jump to exception handler (may be _remove_activation_entry!) 1591 1592 // If the exception is not handled in the current frame the frame is removed and 1593 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1594 // 1595 // Note: At this point the bci is still the bxi for the instruction which caused 1596 // the exception and the expression stack is empty. Thus, for any VM calls 1597 // at this point, GC will find a legal oop map (with empty expression stack). 1598 1599 // In current activation 1600 // tos: exception 1601 // Rbcp: exception bcp 1602 1603 // 1604 // JVMTI PopFrame support 1605 // 1606 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1607 1608 #ifdef AARCH64 1609 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1610 #endif // AARCH64 1611 1612 __ empty_expression_stack(); 1613 1614 // Set the popframe_processing bit in _popframe_condition indicating that we are 1615 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1616 // popframe handling cycles. 1617 1618 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1619 __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit); 1620 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1621 1622 { 1623 // Check to see whether we are returning to a deoptimized frame. 1624 // (The PopFrame call ensures that the caller of the popped frame is 1625 // either interpreted or compiled and deoptimizes it if compiled.) 1626 // In this case, we can't call dispatch_next() after the frame is 1627 // popped, but instead must save the incoming arguments and restore 1628 // them after deoptimization has occurred. 1629 // 1630 // Note that we don't compare the return PC against the 1631 // deoptimization blob's unpack entry because of the presence of 1632 // adapter frames in C2. 1633 Label caller_not_deoptimized; 1634 __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize)); 1635 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0); 1636 __ cbnz_32(R0, caller_not_deoptimized); 1637 #ifdef AARCH64 1638 __ NOT_TESTED(); 1639 #endif 1640 1641 // Compute size of arguments for saving when returning to deoptimized caller 1642 __ restore_method(); 1643 __ ldr(R0, Address(Rmethod, Method::const_offset())); 1644 __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset())); 1645 1646 __ logical_shift_left(R1, R0, Interpreter::logStackElementSize); 1647 // Save these arguments 1648 __ restore_locals(); 1649 __ sub(R2, Rlocals, R1); 1650 __ add(R2, R2, wordSize); 1651 __ mov(R0, Rthread); 1652 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2); 1653 1654 __ remove_activation(vtos, LR, 1655 /* throw_monitor_exception */ false, 1656 /* install_monitor_exception */ false, 1657 /* notify_jvmdi */ false); 1658 1659 // Inform deoptimization that it is responsible for restoring these arguments 1660 __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit); 1661 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1662 1663 // Continue in deoptimization handler 1664 __ ret(); 1665 1666 __ bind(caller_not_deoptimized); 1667 } 1668 1669 __ remove_activation(vtos, R4, 1670 /* throw_monitor_exception */ false, 1671 /* install_monitor_exception */ false, 1672 /* notify_jvmdi */ false); 1673 1674 #ifndef AARCH64 1675 // Finish with popframe handling 1676 // A previous I2C followed by a deoptimization might have moved the 1677 // outgoing arguments further up the stack. PopFrame expects the 1678 // mutations to those outgoing arguments to be preserved and other 1679 // constraints basically require this frame to look exactly as 1680 // though it had previously invoked an interpreted activation with 1681 // no space between the top of the expression stack (current 1682 // last_sp) and the top of stack. Rather than force deopt to 1683 // maintain this kind of invariant all the time we call a small 1684 // fixup routine to move the mutated arguments onto the top of our 1685 // expression stack if necessary. 1686 __ mov(R1, SP); 1687 __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1688 // PC must point into interpreter here 1689 __ set_last_Java_frame(SP, FP, true, Rtemp); 1690 __ mov(R0, Rthread); 1691 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2); 1692 __ reset_last_Java_frame(Rtemp); 1693 #endif // !AARCH64 1694 1695 #ifdef AARCH64 1696 __ restore_sp_after_call(Rtemp); 1697 __ restore_stack_top(); 1698 #else 1699 // Restore the last_sp and null it out 1700 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1701 __ mov(Rtemp, (int)NULL_WORD); 1702 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1703 #endif // AARCH64 1704 1705 __ restore_bcp(); 1706 __ restore_dispatch(); 1707 __ restore_locals(); 1708 __ restore_method(); 1709 1710 // The method data pointer was incremented already during 1711 // call profiling. We have to restore the mdp for the current bcp. 1712 if (ProfileInterpreter) { 1713 __ set_method_data_pointer_for_bcp(); 1714 } 1715 1716 // Clear the popframe condition flag 1717 assert(JavaThread::popframe_inactive == 0, "adjust this code"); 1718 __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset())); 1719 1720 #if INCLUDE_JVMTI 1721 { 1722 Label L_done; 1723 1724 __ ldrb(Rtemp, Address(Rbcp, 0)); 1725 __ cmp(Rtemp, Bytecodes::_invokestatic); 1726 __ b(L_done, ne); 1727 1728 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1729 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1730 1731 // get local0 1732 __ ldr(R1, Address(Rlocals, 0)); 1733 __ mov(R2, Rmethod); 1734 __ mov(R3, Rbcp); 1735 __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3); 1736 1737 __ cbz(R0, L_done); 1738 1739 __ str(R0, Address(Rstack_top)); 1740 __ bind(L_done); 1741 } 1742 #endif // INCLUDE_JVMTI 1743 1744 __ dispatch_next(vtos); 1745 // end of PopFrame support 1746 1747 Interpreter::_remove_activation_entry = __ pc(); 1748 1749 // preserve exception over this code sequence 1750 __ pop_ptr(R0_tos); 1751 __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset())); 1752 // remove the activation (without doing throws on illegalMonitorExceptions) 1753 __ remove_activation(vtos, Rexception_pc, false, true, false); 1754 // restore exception 1755 __ get_vm_result(Rexception_obj, Rtemp); 1756 1757 // Inbetween activations - previous activation type unknown yet 1758 // compute continuation point - the continuation point expects 1759 // the following registers set up: 1760 // 1761 // Rexception_obj: exception 1762 // Rexception_pc: return address/pc that threw exception 1763 // SP: expression stack of caller 1764 // FP: frame pointer of caller 1765 __ mov(c_rarg0, Rthread); 1766 __ mov(c_rarg1, Rexception_pc); 1767 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 1768 // Note that an "issuing PC" is actually the next PC after the call 1769 1770 __ jump(R0); // jump to exception handler of caller 1771 } 1772 1773 1774 // 1775 // JVMTI ForceEarlyReturn support 1776 // 1777 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1778 address entry = __ pc(); 1779 1780 #ifdef AARCH64 1781 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1782 #endif // AARCH64 1783 1784 __ restore_bcp(); 1785 __ restore_dispatch(); 1786 __ restore_locals(); 1787 1788 __ empty_expression_stack(); 1789 1790 __ load_earlyret_value(state); 1791 1792 // Clear the earlyret state 1793 __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 1794 1795 assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code"); 1796 __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset())); 1797 1798 __ remove_activation(state, LR, 1799 false, /* throw_monitor_exception */ 1800 false, /* install_monitor_exception */ 1801 true); /* notify_jvmdi */ 1802 1803 #ifndef AARCH64 1804 // According to interpreter calling conventions, result is returned in R0/R1, 1805 // so ftos (S0) and dtos (D0) are moved to R0/R1. 1806 // This conversion should be done after remove_activation, as it uses 1807 // push(state) & pop(state) to preserve return value. 1808 __ convert_tos_to_retval(state); 1809 #endif // !AARCH64 1810 __ ret(); 1811 1812 return entry; 1813 } // end of ForceEarlyReturn support 1814 1815 1816 //------------------------------------------------------------------------------------------------------------------------ 1817 // Helper for vtos entry point generation 1818 1819 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1820 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1821 Label L; 1822 1823 #ifdef __SOFTFP__ 1824 dep = __ pc(); // fall through 1825 #else 1826 fep = __ pc(); __ push(ftos); __ b(L); 1827 dep = __ pc(); __ push(dtos); __ b(L); 1828 #endif // __SOFTFP__ 1829 1830 lep = __ pc(); __ push(ltos); __ b(L); 1831 1832 if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops 1833 aep = __ pc(); __ push(atos); __ b(L); 1834 } else { 1835 aep = __ pc(); // fall through 1836 } 1837 1838 #ifdef __SOFTFP__ 1839 fep = __ pc(); // fall through 1840 #endif // __SOFTFP__ 1841 1842 bep = cep = sep = // fall through 1843 iep = __ pc(); __ push(itos); // fall through 1844 vep = __ pc(); __ bind(L); // fall through 1845 generate_and_dispatch(t); 1846 } 1847 1848 //------------------------------------------------------------------------------------------------------------------------ 1849 1850 // Non-product code 1851 #ifndef PRODUCT 1852 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1853 address entry = __ pc(); 1854 1855 // prepare expression stack 1856 __ push(state); // save tosca 1857 1858 // pass tosca registers as arguments 1859 __ mov(R2, R0_tos); 1860 #ifdef AARCH64 1861 __ mov(R3, ZR); 1862 #else 1863 __ mov(R3, R1_tos_hi); 1864 #endif // AARCH64 1865 __ mov(R1, LR); // save return address 1866 1867 // call tracer 1868 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3); 1869 1870 __ mov(LR, R0); // restore return address 1871 __ pop(state); // restore tosca 1872 1873 // return 1874 __ ret(); 1875 1876 return entry; 1877 } 1878 1879 1880 void TemplateInterpreterGenerator::count_bytecode() { 1881 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true); 1882 } 1883 1884 1885 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1886 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true); 1887 } 1888 1889 1890 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1891 const Register Rindex_addr = R2_tmp; 1892 Label Lcontinue; 1893 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters); 1894 InlinedAddress Lindex((address)&BytecodePairHistogram::_index); 1895 const Register Rcounters_addr = R2_tmp; 1896 const Register Rindex = R4_tmp; 1897 1898 // calculate new index for counter: 1899 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes). 1900 // (_index >> log2_number_of_codes) is previous bytecode 1901 1902 __ ldr_literal(Rindex_addr, Lindex); 1903 __ ldr_s32(Rindex, Address(Rindex_addr)); 1904 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1905 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes)); 1906 __ str_32(Rindex, Address(Rindex_addr)); 1907 1908 // Rindex (R4) contains index of counter 1909 1910 __ ldr_literal(Rcounters_addr, Lcounters); 1911 __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1912 __ adds_32(Rtemp, Rtemp, 1); 1913 __ b(Lcontinue, mi); // avoid overflow 1914 __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1915 1916 __ b(Lcontinue); 1917 1918 __ bind_literal(Lindex); 1919 __ bind_literal(Lcounters); 1920 1921 __ bind(Lcontinue); 1922 } 1923 1924 1925 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1926 // Call a little run-time stub to avoid blow-up for each bytecode. 1927 // The run-time runtime saves the right registers, depending on 1928 // the tosca in-state for the given template. 1929 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1930 "entry must have been generated"); 1931 address trace_entry = Interpreter::trace_code(t->tos_in()); 1932 __ call(trace_entry, relocInfo::none); 1933 } 1934 1935 1936 void TemplateInterpreterGenerator::stop_interpreter_at() { 1937 Label Lcontinue; 1938 const Register stop_at = R2_tmp; 1939 1940 __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value); 1941 __ mov_slow(stop_at, StopInterpreterAt); 1942 1943 // test bytecode counter 1944 __ cmp(Rtemp, stop_at); 1945 __ b(Lcontinue, ne); 1946 1947 __ trace_state("stop_interpreter_at"); 1948 __ breakpoint(); 1949 1950 __ bind(Lcontinue); 1951 } 1952 #endif // !PRODUCT