1 /* 2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include "utilities/macros.hpp" 50 51 // Size of interpreter code. Increase if too small. Interpreter will 52 // fail with a guarantee ("not enough space for interpreter generation"); 53 // if too small. 54 // Run with +PrintInterpreter to get the VM to print out the size. 55 // Max size with JVMTI 56 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024; 57 58 #define __ _masm-> 59 60 //------------------------------------------------------------------------------------------------------------------------ 61 62 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 63 address entry = __ pc(); 64 65 // callee-save register for saving LR, shared with generate_native_entry 66 const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0); 67 68 __ mov(Rsaved_ret_addr, LR); 69 70 __ mov(R1, Rmethod); 71 __ mov(R2, Rlocals); 72 __ mov(R3, SP); 73 74 #ifdef AARCH64 75 // expand expr. stack and extended SP to avoid cutting SP in call_VM 76 __ mov(Rstack_top, SP); 77 __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 78 __ check_stack_top(); 79 80 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false); 81 82 __ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed)); 83 __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed)); 84 __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed)); 85 __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed)); 86 87 __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed)); 88 __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed)); 89 __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed)); 90 __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed)); 91 #else 92 93 // Safer to save R9 (when scratched) since callers may have been 94 // written assuming R9 survives. This is suboptimal but 95 // probably not important for this slow case call site. 96 // Note for R9 saving: slow_signature_handler may copy register 97 // arguments above the current SP (passed as R3). It is safe for 98 // call_VM to use push and pop to protect additional values on the 99 // stack if needed. 100 __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/); 101 __ add(SP, SP, wordSize); // Skip R0 102 __ pop(RegisterSet(R1, R3)); // Load arguments passed in registers 103 #ifdef __ABI_HARD__ 104 // Few alternatives to an always-load-FP-registers approach: 105 // - parse method signature to detect FP arguments 106 // - keep a counter/flag on a stack indicationg number of FP arguments in the method. 107 // The later has been originally implemented and tested but a conditional path could 108 // eliminate any gain imposed by avoiding 8 double word loads. 109 __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback); 110 #endif // __ABI_HARD__ 111 #endif // AARCH64 112 113 __ ret(Rsaved_ret_addr); 114 115 return entry; 116 } 117 118 119 // 120 // Various method entries (that c++ and asm interpreter agree upon) 121 //------------------------------------------------------------------------------------------------------------------------ 122 // 123 // 124 125 // Abstract method entry 126 // Attempt to execute abstract method. Throw exception 127 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 128 address entry_point = __ pc(); 129 130 #ifdef AARCH64 131 __ restore_sp_after_call(Rtemp); 132 __ restore_stack_top(); 133 #endif 134 135 __ empty_expression_stack(); 136 137 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 138 139 DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here 140 return entry_point; 141 } 142 143 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 144 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 145 146 // TODO: ARM 147 return NULL; 148 149 address entry_point = __ pc(); 150 STOP("generate_math_entry"); 151 return entry_point; 152 } 153 154 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 155 address entry = __ pc(); 156 157 // Note: There should be a minimal interpreter frame set up when stack 158 // overflow occurs since we check explicitly for it now. 159 // 160 #ifdef ASSERT 161 { Label L; 162 __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize); 163 __ cmp(SP, Rtemp); // Rtemp = maximal SP for current FP, 164 // (stack grows negative) 165 __ b(L, ls); // check if frame is complete 166 __ stop ("interpreter frame not set up"); 167 __ bind(L); 168 } 169 #endif // ASSERT 170 171 // Restore bcp under the assumption that the current frame is still 172 // interpreted 173 __ restore_bcp(); 174 175 // expression stack must be empty before entering the VM if an exception 176 // happened 177 __ empty_expression_stack(); 178 179 // throw exception 180 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 181 182 __ should_not_reach_here(); 183 184 return entry; 185 } 186 187 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 188 address entry = __ pc(); 189 190 // index is in R4_ArrayIndexOutOfBounds_index 191 192 InlinedString Lname(name); 193 194 // expression stack must be empty before entering the VM if an exception happened 195 __ empty_expression_stack(); 196 197 // setup parameters 198 __ ldr_literal(R1, Lname); 199 __ mov(R2, R4_ArrayIndexOutOfBounds_index); 200 201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); 202 203 __ nop(); // to avoid filling CPU pipeline with invalid instructions 204 __ nop(); 205 __ should_not_reach_here(); 206 __ bind_literal(Lname); 207 208 return entry; 209 } 210 211 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 212 address entry = __ pc(); 213 214 // object is in R2_ClassCastException_obj 215 216 // expression stack must be empty before entering the VM if an exception 217 // happened 218 __ empty_expression_stack(); 219 220 __ mov(R1, R2_ClassCastException_obj); 221 __ call_VM(noreg, 222 CAST_FROM_FN_PTR(address, 223 InterpreterRuntime::throw_ClassCastException), 224 R1); 225 226 __ should_not_reach_here(); 227 228 return entry; 229 } 230 231 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 232 assert(!pass_oop || message == NULL, "either oop or message but not both"); 233 address entry = __ pc(); 234 235 InlinedString Lname(name); 236 InlinedString Lmessage(message); 237 238 if (pass_oop) { 239 // object is at TOS 240 __ pop_ptr(R2); 241 } 242 243 // expression stack must be empty before entering the VM if an exception happened 244 __ empty_expression_stack(); 245 246 // setup parameters 247 __ ldr_literal(R1, Lname); 248 249 if (pass_oop) { 250 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2); 251 } else { 252 if (message != NULL) { 253 __ ldr_literal(R2, Lmessage); 254 } else { 255 __ mov(R2, 0); 256 } 257 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2); 258 } 259 260 // throw exception 261 __ b(Interpreter::throw_exception_entry()); 262 263 __ nop(); // to avoid filling CPU pipeline with invalid instructions 264 __ nop(); 265 __ bind_literal(Lname); 266 if (!pass_oop && (message != NULL)) { 267 __ bind_literal(Lmessage); 268 } 269 270 return entry; 271 } 272 273 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 274 address entry = __ pc(); 275 276 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 277 278 #ifdef AARCH64 279 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 280 __ restore_stack_top(); 281 #else 282 // Restore stack bottom in case i2c adjusted stack 283 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 284 // and NULL it as marker that SP is now tos until next java call 285 __ mov(Rtemp, (int)NULL_WORD); 286 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 287 #endif // AARCH64 288 289 __ restore_method(); 290 __ restore_bcp(); 291 __ restore_dispatch(); 292 __ restore_locals(); 293 294 const Register Rcache = R2_tmp; 295 const Register Rindex = R3_tmp; 296 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 297 298 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 299 __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 300 __ check_stack_top(); 301 __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); 302 303 #ifndef AARCH64 304 __ convert_retval_to_tos(state); 305 #endif // !AARCH64 306 307 __ dispatch_next(state, step); 308 309 return entry; 310 } 311 312 313 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 314 address entry = __ pc(); 315 316 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 317 318 #ifdef AARCH64 319 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 320 __ restore_stack_top(); 321 #else 322 // The stack is not extended by deopt but we must NULL last_sp as this 323 // entry is like a "return". 324 __ mov(Rtemp, 0); 325 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 326 #endif // AARCH64 327 328 __ restore_method(); 329 __ restore_bcp(); 330 __ restore_dispatch(); 331 __ restore_locals(); 332 333 // handle exceptions 334 { Label L; 335 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 336 __ cbz(Rtemp, L); 337 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 338 __ should_not_reach_here(); 339 __ bind(L); 340 } 341 342 __ dispatch_next(state, step); 343 344 return entry; 345 } 346 347 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 348 #ifdef AARCH64 349 address entry = __ pc(); 350 switch (type) { 351 case T_BOOLEAN: 352 __ tst(R0, 0xff); 353 __ cset(R0, ne); 354 break; 355 case T_CHAR : __ zero_extend(R0, R0, 16); break; 356 case T_BYTE : __ sign_extend(R0, R0, 8); break; 357 case T_SHORT : __ sign_extend(R0, R0, 16); break; 358 case T_INT : // fall through 359 case T_LONG : // fall through 360 case T_VOID : // fall through 361 case T_FLOAT : // fall through 362 case T_DOUBLE : /* nothing to do */ break; 363 case T_OBJECT : 364 // retrieve result from frame 365 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 366 // and verify it 367 __ verify_oop(R0); 368 break; 369 default : ShouldNotReachHere(); 370 } 371 __ ret(); 372 return entry; 373 #else 374 // Result handlers are not used on 32-bit ARM 375 // since the returned value is already in appropriate format. 376 __ should_not_reach_here(); // to avoid empty code block 377 378 // The result handler non-zero indicates an object is returned and this is 379 // used in the native entry code. 380 return type == T_OBJECT ? (address)(-1) : NULL; 381 #endif // AARCH64 382 } 383 384 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 385 address entry = __ pc(); 386 __ push(state); 387 __ call_VM(noreg, runtime_entry); 388 389 // load current bytecode 390 __ ldrb(R3_bytecode, Address(Rbcp)); 391 __ dispatch_only_normal(vtos); 392 return entry; 393 } 394 395 396 // Helpers for commoning out cases in the various type of method entries. 397 // 398 399 // increment invocation count & check for overflow 400 // 401 // Note: checking for negative value instead of overflow 402 // so we have a 'sticky' overflow test 403 // 404 // In: Rmethod. 405 // 406 // Uses R0, R1, Rtemp. 407 // 408 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, 409 Label* profile_method, 410 Label* profile_method_continue) { 411 Label done; 412 const Register Rcounters = Rtemp; 413 const Address invocation_counter(Rcounters, 414 MethodCounters::invocation_counter_offset() + 415 InvocationCounter::counter_offset()); 416 417 // Note: In tiered we increment either counters in MethodCounters* or 418 // in MDO depending if we're profiling or not. 419 if (TieredCompilation) { 420 int increment = InvocationCounter::count_increment; 421 Label no_mdo; 422 if (ProfileInterpreter) { 423 // Are we profiling? 424 __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset())); 425 __ cbz(R1_tmp, no_mdo); 426 // Increment counter in the MDO 427 const Address mdo_invocation_counter(R1_tmp, 428 in_bytes(MethodData::invocation_counter_offset()) + 429 in_bytes(InvocationCounter::counter_offset())); 430 const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset())); 431 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow); 432 __ b(done); 433 } 434 __ bind(no_mdo); 435 __ get_method_counters(Rmethod, Rcounters, done); 436 const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset())); 437 __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow); 438 __ bind(done); 439 } else { // not TieredCompilation 440 const Address backedge_counter(Rcounters, 441 MethodCounters::backedge_counter_offset() + 442 InvocationCounter::counter_offset()); 443 444 const Register Ricnt = R0_tmp; // invocation counter 445 const Register Rbcnt = R1_tmp; // backedge counter 446 447 __ get_method_counters(Rmethod, Rcounters, done); 448 449 if (ProfileInterpreter) { 450 const Register Riic = R1_tmp; 451 __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 452 __ add(Riic, Riic, 1); 453 __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 454 } 455 456 // Update standard invocation counters 457 458 __ ldr_u32(Ricnt, invocation_counter); 459 __ ldr_u32(Rbcnt, backedge_counter); 460 461 __ add(Ricnt, Ricnt, InvocationCounter::count_increment); 462 463 #ifdef AARCH64 464 __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits 465 #else 466 __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits 467 #endif // AARCH64 468 469 __ str_32(Ricnt, invocation_counter); // save invocation count 470 __ add(Ricnt, Ricnt, Rbcnt); // add both counters 471 472 // profile_method is non-null only for interpreted method so 473 // profile_method != NULL == !native_call 474 // BytecodeInterpreter only calls for native so code is elided. 475 476 if (ProfileInterpreter && profile_method != NULL) { 477 assert(profile_method_continue != NULL, "should be non-null"); 478 479 // Test to see if we should create a method data oop 480 // Reuse R1_tmp as we don't need backedge counters anymore. 481 Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 482 __ ldr_s32(R1_tmp, profile_limit); 483 __ cmp_32(Ricnt, R1_tmp); 484 __ b(*profile_method_continue, lt); 485 486 // if no method data exists, go to profile_method 487 __ test_method_data_pointer(R1_tmp, *profile_method); 488 } 489 490 Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 491 __ ldr_s32(R1_tmp, invoke_limit); 492 __ cmp_32(Ricnt, R1_tmp); 493 __ b(*overflow, hs); 494 __ bind(done); 495 } 496 } 497 498 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 499 // InterpreterRuntime::frequency_counter_overflow takes one argument 500 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 501 // The call returns the address of the verified entry point for the method or NULL 502 // if the compilation did not complete (either went background or bailed out). 503 __ mov(R1, (int)false); 504 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 505 506 // jump to the interpreted entry. 507 __ b(do_continue); 508 } 509 510 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 511 // Check if we've got enough room on the stack for 512 // - overhead; 513 // - locals; 514 // - expression stack. 515 // 516 // Registers on entry: 517 // 518 // R3 = number of additional locals 519 // R11 = max expression stack slots (AArch64 only) 520 // Rthread 521 // Rmethod 522 // Registers used: R0, R1, R2, Rtemp. 523 524 const Register Radditional_locals = R3; 525 const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2); 526 527 // monitor entry size 528 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 529 530 // total overhead size: entry_size + (saved registers, thru expr stack bottom). 531 // be sure to change this if you add/subtract anything to/from the overhead area 532 const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size; 533 534 // Pages reserved for VM runtime calls and subsequent Java calls. 535 const int reserved_pages = JavaThread::stack_shadow_zone_size(); 536 537 // Thread::stack_size() includes guard pages, and they should not be touched. 538 const int guard_pages = JavaThread::stack_guard_zone_size(); 539 540 __ ldr(R0, Address(Rthread, Thread::stack_base_offset())); 541 __ ldr(R1, Address(Rthread, Thread::stack_size_offset())); 542 #ifndef AARCH64 543 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 544 __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset())); 545 #endif // !AARCH64 546 __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words()); 547 548 // reserve space for additional locals 549 __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize)); 550 551 // stack size 552 __ sub(R0, R0, R1); 553 554 // reserve space for expression stack 555 __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 556 557 __ cmp(Rtemp, R0); 558 559 #ifdef AARCH64 560 Label L; 561 __ b(L, hi); 562 __ mov(SP, Rsender_sp); // restore SP 563 __ b(StubRoutines::throw_StackOverflowError_entry()); 564 __ bind(L); 565 #else 566 __ mov(SP, Rsender_sp, ls); // restore SP 567 __ b(StubRoutines::throw_StackOverflowError_entry(), ls); 568 #endif // AARCH64 569 } 570 571 572 // Allocate monitor and lock method (asm interpreter) 573 // 574 void TemplateInterpreterGenerator::lock_method() { 575 // synchronize method 576 577 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 578 assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment"); 579 580 #ifdef ASSERT 581 { Label L; 582 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 583 __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 584 __ stop("method doesn't need synchronization"); 585 __ bind(L); 586 } 587 #endif // ASSERT 588 589 // get synchronization object 590 { Label done; 591 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 592 #ifdef AARCH64 593 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 594 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done); 595 #else 596 __ tst(Rtemp, JVM_ACC_STATIC); 597 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case) 598 __ b(done, eq); 599 #endif // AARCH64 600 __ load_mirror(R0, Rmethod, Rtemp); 601 __ bind(done); 602 } 603 604 // add space for monitor & lock 605 606 #ifdef AARCH64 607 __ check_extended_sp(Rtemp); 608 __ sub(SP, SP, entry_size); // adjust extended SP 609 __ mov(Rtemp, SP); 610 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 611 #endif // AARCH64 612 613 __ sub(Rstack_top, Rstack_top, entry_size); 614 __ check_stack_top_on_expansion(); 615 // add space for a monitor entry 616 __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 617 // set new monitor block top 618 __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes())); 619 // store object 620 __ mov(R1, Rstack_top); // monitor entry address 621 __ lock_object(R1); 622 } 623 624 #ifdef AARCH64 625 626 // 627 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 628 // and for native methods hence the shared code. 629 // 630 // On entry: 631 // R10 = ConstMethod 632 // R11 = max expr. stack (in slots), if !native_call 633 // 634 // On exit: 635 // Rbcp, Rstack_top are initialized, SP is extended 636 // 637 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 638 // Incoming registers 639 const Register RconstMethod = R10; 640 const Register RmaxStack = R11; 641 // Temporary registers 642 const Register RextendedSP = R0; 643 const Register Rcache = R1; 644 const Register Rmdp = ProfileInterpreter ? R2 : ZR; 645 646 // Generates the following stack layout (stack grows up in this picture): 647 // 648 // [ expr. stack bottom ] 649 // [ saved Rbcp ] 650 // [ current Rlocals ] 651 // [ cache ] 652 // [ mdx ] 653 // [ mirror ] 654 // [ Method* ] 655 // [ extended SP ] 656 // [ expr. stack top ] 657 // [ sender_sp ] 658 // [ saved FP ] <--- FP 659 // [ saved LR ] 660 661 // initialize fixed part of activation frame 662 __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed)); 663 __ mov(FP, SP); // establish new FP 664 665 // setup Rbcp 666 if (native_call) { 667 __ mov(Rbcp, ZR); // bcp = 0 for native calls 668 } else { 669 __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase 670 } 671 672 // Rstack_top & RextendedSP 673 __ sub(Rstack_top, SP, 10*wordSize); 674 if (native_call) { 675 __ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling 676 } else { 677 __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 678 __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes); 679 } 680 __ mov(SP, RextendedSP); 681 __ check_stack_top(); 682 683 // Load Rmdp 684 if (ProfileInterpreter) { 685 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 686 __ tst(Rtemp, Rtemp); 687 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 688 __ csel(Rmdp, ZR, Rtemp, eq); 689 } 690 691 // Load Rcache 692 __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset())); 693 __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 694 // Get mirror and store it in the frame as GC root for this Method* 695 __ load_mirror(Rtemp, Rmethod, Rtemp); 696 697 // Build fixed frame 698 __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize)); 699 __ stp(Rlocals, Rcache, Address(FP, -8*wordSize)); 700 __ stp(Rmdp, Rtemp, Address(FP, -6*wordSize)); 701 __ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize)); 702 __ stp(ZR, Rsender_sp, Address(FP, -2*wordSize)); 703 assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken"); 704 assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken"); 705 } 706 707 #else // AARCH64 708 709 // 710 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 711 // and for native methods hence the shared code. 712 713 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 714 // Generates the following stack layout: 715 // 716 // [ expr. stack bottom ] 717 // [ saved Rbcp ] 718 // [ current Rlocals ] 719 // [ cache ] 720 // [ mdx ] 721 // [ Method* ] 722 // [ last_sp ] 723 // [ sender_sp ] 724 // [ saved FP ] <--- FP 725 // [ saved LR ] 726 727 // initialize fixed part of activation frame 728 __ push(LR); // save return address 729 __ push(FP); // save FP 730 __ mov(FP, SP); // establish new FP 731 732 __ push(Rsender_sp); 733 734 __ mov(R0, 0); 735 __ push(R0); // leave last_sp as null 736 737 // setup Rbcp 738 if (native_call) { 739 __ mov(Rbcp, 0); // bcp = 0 for native calls 740 } else { 741 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod* 742 __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase 743 } 744 745 __ push(Rmethod); // save Method* 746 // Get mirror and store it in the frame as GC root for this Method* 747 __ load_mirror(Rtemp, Rmethod, Rtemp); 748 __ push(Rtemp); 749 750 if (ProfileInterpreter) { 751 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 752 __ tst(Rtemp, Rtemp); 753 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne); 754 __ push(Rtemp); // set the mdp (method data pointer) 755 } else { 756 __ push(R0); 757 } 758 759 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 760 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 761 __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 762 __ push(Rtemp); // set constant pool cache 763 __ push(Rlocals); // set locals pointer 764 __ push(Rbcp); // set bcp 765 __ push(R0); // reserve word for pointer to expression stack bottom 766 __ str(SP, Address(SP, 0)); // set expression stack bottom 767 } 768 769 #endif // AARCH64 770 771 // End of helpers 772 773 //------------------------------------------------------------------------------------------------------------------------ 774 // Entry points 775 // 776 // Here we generate the various kind of entries into the interpreter. 777 // The two main entry type are generic bytecode methods and native call method. 778 // These both come in synchronized and non-synchronized versions but the 779 // frame layout they create is very similar. The other method entry 780 // types are really just special purpose entries that are really entry 781 // and interpretation all in one. These are for trivial methods like 782 // accessor, empty, or special math methods. 783 // 784 // When control flow reaches any of the entry types for the interpreter 785 // the following holds -> 786 // 787 // Arguments: 788 // 789 // Rmethod: Method* 790 // Rthread: thread 791 // Rsender_sp: sender sp 792 // Rparams (SP on 32-bit ARM): pointer to method parameters 793 // 794 // LR: return address 795 // 796 // Stack layout immediately at entry 797 // 798 // [ optional padding(*)] <--- SP (AArch64) 799 // [ parameter n ] <--- Rparams (SP on 32-bit ARM) 800 // ... 801 // [ parameter 1 ] 802 // [ expression stack ] (caller's java expression stack) 803 804 // Assuming that we don't go to one of the trivial specialized 805 // entries the stack will look like below when we are ready to execute 806 // the first bytecode (or call the native routine). The register usage 807 // will be as the template based interpreter expects. 808 // 809 // local variables follow incoming parameters immediately; i.e. 810 // the return address is saved at the end of the locals. 811 // 812 // [ reserved stack (*) ] <--- SP (AArch64) 813 // [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM) 814 // [ monitor entry ] 815 // ... 816 // [ monitor entry ] 817 // [ expr. stack bottom ] 818 // [ saved Rbcp ] 819 // [ current Rlocals ] 820 // [ cache ] 821 // [ mdx ] 822 // [ mirror ] 823 // [ Method* ] 824 // 825 // 32-bit ARM: 826 // [ last_sp ] 827 // 828 // AArch64: 829 // [ extended SP (*) ] 830 // [ stack top (*) ] 831 // 832 // [ sender_sp ] 833 // [ saved FP ] <--- FP 834 // [ saved LR ] 835 // [ optional padding(*)] 836 // [ local variable m ] 837 // ... 838 // [ local variable 1 ] 839 // [ parameter n ] 840 // ... 841 // [ parameter 1 ] <--- Rlocals 842 // 843 // (*) - AArch64 only 844 // 845 846 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 847 #if INCLUDE_ALL_GCS 848 if (UseG1GC) { 849 // Code: _aload_0, _getfield, _areturn 850 // parameter size = 1 851 // 852 // The code that gets generated by this routine is split into 2 parts: 853 // 1. The "intrinsified" code for G1 (or any SATB based GC), 854 // 2. The slow path - which is an expansion of the regular method entry. 855 // 856 // Notes:- 857 // * In the G1 code we do not check whether we need to block for 858 // a safepoint. If G1 is enabled then we must execute the specialized 859 // code for Reference.get (except when the Reference object is null) 860 // so that we can log the value in the referent field with an SATB 861 // update buffer. 862 // If the code for the getfield template is modified so that the 863 // G1 pre-barrier code is executed when the current method is 864 // Reference.get() then going through the normal method entry 865 // will be fine. 866 // * The G1 code can, however, check the receiver object (the instance 867 // of java.lang.Reference) and jump to the slow path if null. If the 868 // Reference object is null then we obviously cannot fetch the referent 869 // and so we don't need to call the G1 pre-barrier. Thus we can use the 870 // regular method entry code to generate the NPE. 871 // 872 // This code is based on generate_accessor_enty. 873 // 874 // Rmethod: Method* 875 // Rthread: thread 876 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path 877 // Rparams: parameters 878 879 address entry = __ pc(); 880 Label slow_path; 881 const Register Rthis = R0; 882 const Register Rret_addr = Rtmp_save1; 883 assert_different_registers(Rthis, Rret_addr, Rsender_sp); 884 885 const int referent_offset = java_lang_ref_Reference::referent_offset; 886 guarantee(referent_offset > 0, "referent offset not initialized"); 887 888 // Check if local 0 != NULL 889 // If the receiver is null then it is OK to jump to the slow path. 890 __ ldr(Rthis, Address(Rparams)); 891 __ cbz(Rthis, slow_path); 892 893 // Generate the G1 pre-barrier code to log the value of 894 // the referent field in an SATB buffer. 895 896 // Load the value of the referent field. 897 __ load_heap_oop(R0, Address(Rthis, referent_offset)); 898 899 // Preserve LR 900 __ mov(Rret_addr, LR); 901 902 __ g1_write_barrier_pre(noreg, // store_addr 903 noreg, // new_val 904 R0, // pre_val 905 Rtemp, // tmp1 906 R1_tmp); // tmp2 907 908 // _areturn 909 __ mov(SP, Rsender_sp); 910 __ ret(Rret_addr); 911 912 // generate a vanilla interpreter entry as the slow path 913 __ bind(slow_path); 914 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 915 return entry; 916 } 917 #endif // INCLUDE_ALL_GCS 918 919 // If G1 is not enabled then attempt to go through the normal entry point 920 return NULL; 921 } 922 923 // Not supported 924 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; } 925 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 926 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 927 928 // 929 // Interpreter stub for calling a native method. (asm interpreter) 930 // This sets up a somewhat different looking stack for calling the native method 931 // than the typical interpreter frame setup. 932 // 933 934 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 935 // determine code generation flags 936 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 937 938 // Incoming registers: 939 // 940 // Rmethod: Method* 941 // Rthread: thread 942 // Rsender_sp: sender sp 943 // Rparams: parameters 944 945 address entry_point = __ pc(); 946 947 // Register allocation 948 const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6); 949 const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */); 950 const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */); 951 const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6); 952 953 #ifdef AARCH64 954 const Register RconstMethod = R10; // also used in generate_fixed_frame (should match) 955 const Register Rsaved_result = Rnative_code; 956 const FloatRegister Dsaved_result = V8; 957 #else 958 const Register Rsaved_result_lo = Rtmp_save0; // R4 959 const Register Rsaved_result_hi = Rtmp_save1; // R5 960 FloatRegister saved_result_fp; 961 #endif // AARCH64 962 963 964 #ifdef AARCH64 965 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 966 __ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 967 #else 968 __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset())); 969 __ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset())); 970 #endif // AARCH64 971 972 // native calls don't need the stack size check since they have no expression stack 973 // and the arguments are already on the stack and we only add a handful of words 974 // to the stack 975 976 // compute beginning of parameters (Rlocals) 977 __ sub(Rlocals, Rparams, wordSize); 978 __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize)); 979 980 #ifdef AARCH64 981 int extra_stack_reserve = 2*wordSize; // extra space for oop_temp 982 if(__ can_post_interpreter_events()) { 983 // extra space for saved results 984 extra_stack_reserve += 2*wordSize; 985 } 986 // reserve extra stack space and nullify oop_temp slot 987 __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed)); 988 #else 989 // reserve stack space for oop_temp 990 __ mov(R0, 0); 991 __ push(R0); 992 #endif // AARCH64 993 994 generate_fixed_frame(true); // Note: R9 is now saved in the frame 995 996 // make sure method is native & not abstract 997 #ifdef ASSERT 998 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 999 { 1000 Label L; 1001 __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1002 __ stop("tried to execute non-native method as native"); 1003 __ bind(L); 1004 } 1005 { Label L; 1006 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1007 __ stop("tried to execute abstract method in interpreter"); 1008 __ bind(L); 1009 } 1010 #endif 1011 1012 // increment invocation count & check for overflow 1013 Label invocation_counter_overflow; 1014 if (inc_counter) { 1015 if (synchronized) { 1016 // Avoid unlocking method's monitor in case of exception, as it has not 1017 // been locked yet. 1018 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1019 } 1020 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1021 } 1022 1023 Label continue_after_compile; 1024 __ bind(continue_after_compile); 1025 1026 if (inc_counter && synchronized) { 1027 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1028 } 1029 1030 // check for synchronized methods 1031 // Must happen AFTER invocation_counter check and stack overflow check, 1032 // so method is not locked if overflows. 1033 // 1034 if (synchronized) { 1035 lock_method(); 1036 } else { 1037 // no synchronization necessary 1038 #ifdef ASSERT 1039 { Label L; 1040 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1041 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1042 __ stop("method needs synchronization"); 1043 __ bind(L); 1044 } 1045 #endif 1046 } 1047 1048 // start execution 1049 #ifdef ASSERT 1050 { Label L; 1051 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1052 __ cmp(Rtemp, Rstack_top); 1053 __ b(L, eq); 1054 __ stop("broken stack frame setup in interpreter"); 1055 __ bind(L); 1056 } 1057 #endif 1058 __ check_extended_sp(Rtemp); 1059 1060 // jvmti/dtrace support 1061 __ notify_method_entry(); 1062 #if R9_IS_SCRATCHED 1063 __ restore_method(); 1064 #endif 1065 1066 { 1067 Label L; 1068 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1069 __ cbnz(Rsig_handler, L); 1070 __ mov(R1, Rmethod); 1071 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true); 1072 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1073 __ bind(L); 1074 } 1075 1076 { 1077 Label L; 1078 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1079 __ cbnz(Rnative_code, L); 1080 __ mov(R1, Rmethod); 1081 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1); 1082 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1083 __ bind(L); 1084 } 1085 1086 // Allocate stack space for arguments 1087 1088 #ifdef AARCH64 1089 __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord); 1090 __ align_reg(SP, Rtemp, StackAlignmentInBytes); 1091 1092 // Allocate more stack space to accomodate all arguments passed on GP and FP registers: 1093 // 8 * wordSize for GPRs 1094 // 8 * wordSize for FPRs 1095 int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes); 1096 #else 1097 1098 // C functions need aligned stack 1099 __ bic(SP, SP, StackAlignmentInBytes - 1); 1100 // Multiply by BytesPerLong instead of BytesPerWord, because calling convention 1101 // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong) 1102 __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong)); 1103 1104 #ifdef __ABI_HARD__ 1105 // Allocate more stack space to accomodate all GP as well as FP registers: 1106 // 4 * wordSize 1107 // 8 * BytesPerLong 1108 int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes); 1109 #else 1110 // Reserve at least 4 words on the stack for loading 1111 // of parameters passed on registers (R0-R3). 1112 // See generate_slow_signature_handler(). 1113 // It is also used for JNIEnv & class additional parameters. 1114 int reg_arguments = 4 * wordSize; 1115 #endif // __ABI_HARD__ 1116 #endif // AARCH64 1117 1118 __ sub(SP, SP, reg_arguments); 1119 1120 1121 // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers. 1122 // See AbstractInterpreterGenerator::generate_slow_signature_handler(). 1123 __ call(Rsig_handler); 1124 #if R9_IS_SCRATCHED 1125 __ restore_method(); 1126 #endif 1127 __ mov(Rresult_handler, R0); 1128 1129 // Pass JNIEnv and mirror for static methods 1130 { 1131 Label L; 1132 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1133 __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset())); 1134 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L); 1135 __ load_mirror(Rtemp, Rmethod, Rtemp); 1136 __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize); 1137 __ str(Rtemp, Address(R1, 0)); 1138 __ bind(L); 1139 } 1140 1141 __ set_last_Java_frame(SP, FP, true, Rtemp); 1142 1143 // Changing state to _thread_in_native must be the last thing to do 1144 // before the jump to native code. At this moment stack must be 1145 // safepoint-safe and completely prepared for stack walking. 1146 #ifdef ASSERT 1147 { 1148 Label L; 1149 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1150 __ cmp_32(Rtemp, _thread_in_Java); 1151 __ b(L, eq); 1152 __ stop("invalid thread state"); 1153 __ bind(L); 1154 } 1155 #endif 1156 1157 #ifdef AARCH64 1158 __ mov(Rtemp, _thread_in_native); 1159 __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset())); 1160 // STLR is used to force all preceding writes to be observed prior to thread state change 1161 __ stlr_w(Rtemp, Rtemp2); 1162 #else 1163 // Force all preceding writes to be observed prior to thread state change 1164 __ membar(MacroAssembler::StoreStore, Rtemp); 1165 1166 __ mov(Rtemp, _thread_in_native); 1167 __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1168 #endif // AARCH64 1169 1170 __ call(Rnative_code); 1171 #if R9_IS_SCRATCHED 1172 __ restore_method(); 1173 #endif 1174 1175 // Set FPSCR/FPCR to a known state 1176 if (AlwaysRestoreFPU) { 1177 __ restore_default_fp_mode(); 1178 } 1179 1180 // Do safepoint check 1181 __ mov(Rtemp, _thread_in_native_trans); 1182 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1183 1184 // Force this write out before the read below 1185 __ membar(MacroAssembler::StoreLoad, Rtemp); 1186 1187 __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state()); 1188 1189 // Protect the return value in the interleaved code: save it to callee-save registers. 1190 #ifdef AARCH64 1191 __ mov(Rsaved_result, R0); 1192 __ fmov_d(Dsaved_result, D0); 1193 #else 1194 __ mov(Rsaved_result_lo, R0); 1195 __ mov(Rsaved_result_hi, R1); 1196 #ifdef __ABI_HARD__ 1197 // preserve native FP result in a callee-saved register 1198 saved_result_fp = D8; 1199 __ fcpyd(saved_result_fp, D0); 1200 #else 1201 saved_result_fp = fnoreg; 1202 #endif // __ABI_HARD__ 1203 #endif // AARCH64 1204 1205 { 1206 __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset())); 1207 __ cmp(Rtemp, SafepointSynchronize::_not_synchronized); 1208 __ cond_cmp(R3, 0, eq); 1209 1210 #ifdef AARCH64 1211 Label L; 1212 __ b(L, eq); 1213 __ mov(R0, Rthread); 1214 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none); 1215 __ bind(L); 1216 #else 1217 __ mov(R0, Rthread, ne); 1218 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne); 1219 #if R9_IS_SCRATCHED 1220 __ restore_method(); 1221 #endif 1222 #endif // AARCH64 1223 } 1224 1225 // Perform Native->Java thread transition 1226 __ mov(Rtemp, _thread_in_Java); 1227 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1228 1229 // Zero handles and last_java_sp 1230 __ reset_last_Java_frame(Rtemp); 1231 __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset())); 1232 __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes())); 1233 if (CheckJNICalls) { 1234 __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1235 } 1236 1237 // Unbox if the result is non-zero object 1238 #ifdef AARCH64 1239 { 1240 Label L, Lnull; 1241 __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT)); 1242 __ cmp(Rresult_handler, Rtemp); 1243 __ b(L, ne); 1244 __ cbz(Rsaved_result, Lnull); 1245 __ ldr(Rsaved_result, Address(Rsaved_result)); 1246 __ bind(Lnull); 1247 // Store oop on the stack for GC 1248 __ str(Rsaved_result, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 1249 __ bind(L); 1250 } 1251 #else 1252 __ tst(Rsaved_result_lo, Rresult_handler); 1253 __ ldr(Rsaved_result_lo, Address(Rsaved_result_lo), ne); 1254 1255 // Store oop on the stack for GC 1256 __ cmp(Rresult_handler, 0); 1257 __ str(Rsaved_result_lo, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1258 #endif // AARCH64 1259 1260 #ifdef AARCH64 1261 // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame 1262 __ restore_sp_after_call(Rtemp); 1263 __ check_stack_top(); 1264 #endif // AARCH64 1265 1266 // reguard stack if StackOverflow exception happened while in native. 1267 { 1268 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset())); 1269 __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled); 1270 #ifdef AARCH64 1271 Label L; 1272 __ b(L, ne); 1273 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none); 1274 __ bind(L); 1275 #else 1276 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq); 1277 #if R9_IS_SCRATCHED 1278 __ restore_method(); 1279 #endif 1280 #endif // AARCH64 1281 } 1282 1283 // check pending exceptions 1284 { 1285 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 1286 #ifdef AARCH64 1287 Label L; 1288 __ cbz(Rtemp, L); 1289 __ mov_pc_to(Rexception_pc); 1290 __ b(StubRoutines::forward_exception_entry()); 1291 __ bind(L); 1292 #else 1293 __ cmp(Rtemp, 0); 1294 __ mov(Rexception_pc, PC, ne); 1295 __ b(StubRoutines::forward_exception_entry(), ne); 1296 #endif // AARCH64 1297 } 1298 1299 if (synchronized) { 1300 // address of first monitor 1301 __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize); 1302 __ unlock_object(R1); 1303 } 1304 1305 // jvmti/dtrace support 1306 // Note: This must happen _after_ handling/throwing any exceptions since 1307 // the exception handler code notifies the runtime of method exits 1308 // too. If this happens before, method entry/exit notifications are 1309 // not properly paired (was bug - gri 11/22/99). 1310 #ifdef AARCH64 1311 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result); 1312 #else 1313 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp); 1314 #endif // AARCH64 1315 1316 // Restore the result. Oop result is restored from the stack. 1317 #ifdef AARCH64 1318 __ mov(R0, Rsaved_result); 1319 __ fmov_d(D0, Dsaved_result); 1320 1321 __ blr(Rresult_handler); 1322 #else 1323 __ cmp(Rresult_handler, 0); 1324 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1325 __ mov(R0, Rsaved_result_lo, eq); 1326 __ mov(R1, Rsaved_result_hi); 1327 1328 #ifdef __ABI_HARD__ 1329 // reload native FP result 1330 __ fcpyd(D0, D8); 1331 #endif // __ABI_HARD__ 1332 1333 #ifdef ASSERT 1334 if (VerifyOops) { 1335 Label L; 1336 __ cmp(Rresult_handler, 0); 1337 __ b(L, eq); 1338 __ verify_oop(R0); 1339 __ bind(L); 1340 } 1341 #endif // ASSERT 1342 #endif // AARCH64 1343 1344 // Restore FP/LR, sender_sp and return 1345 #ifdef AARCH64 1346 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 1347 __ ldp(FP, LR, Address(FP)); 1348 __ mov(SP, Rtemp); 1349 #else 1350 __ mov(Rtemp, FP); 1351 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 1352 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1353 #endif // AARCH64 1354 1355 __ ret(); 1356 1357 if (inc_counter) { 1358 // Handle overflow of counter and compile method 1359 __ bind(invocation_counter_overflow); 1360 generate_counter_overflow(continue_after_compile); 1361 } 1362 1363 return entry_point; 1364 } 1365 1366 // 1367 // Generic interpreted method entry to (asm) interpreter 1368 // 1369 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1370 // determine code generation flags 1371 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1372 1373 // Rmethod: Method* 1374 // Rthread: thread 1375 // Rsender_sp: sender sp (could differ from SP if we were called via c2i) 1376 // Rparams: pointer to the last parameter in the stack 1377 1378 address entry_point = __ pc(); 1379 1380 const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3); 1381 1382 #ifdef AARCH64 1383 const Register RmaxStack = R11; 1384 const Register RlocalsBase = R12; 1385 #endif // AARCH64 1386 1387 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 1388 1389 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 1390 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset())); 1391 1392 // setup Rlocals 1393 __ sub(Rlocals, Rparams, wordSize); 1394 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize)); 1395 1396 __ sub(R3, R3, R2); // number of additional locals 1397 1398 #ifdef AARCH64 1399 // setup RmaxStack 1400 __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); 1401 __ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument 1402 #endif // AARCH64 1403 1404 // see if we've got enough room on the stack for locals plus overhead. 1405 generate_stack_overflow_check(); 1406 1407 #ifdef AARCH64 1408 1409 // allocate space for locals 1410 { 1411 __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize)); 1412 __ align_reg(SP, RlocalsBase, StackAlignmentInBytes); 1413 } 1414 1415 // explicitly initialize locals 1416 { 1417 Label zero_loop, done; 1418 __ cbz(R3, done); 1419 1420 __ tbz(R3, 0, zero_loop); 1421 __ subs(R3, R3, 1); 1422 __ str(ZR, Address(RlocalsBase, wordSize, post_indexed)); 1423 __ b(done, eq); 1424 1425 __ bind(zero_loop); 1426 __ subs(R3, R3, 2); 1427 __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed)); 1428 __ b(zero_loop, ne); 1429 1430 __ bind(done); 1431 } 1432 1433 #else 1434 // allocate space for locals 1435 // explicitly initialize locals 1436 1437 // Loop is unrolled 4 times 1438 Label loop; 1439 __ mov(R0, 0); 1440 __ bind(loop); 1441 1442 // #1 1443 __ subs(R3, R3, 1); 1444 __ push(R0, ge); 1445 1446 // #2 1447 __ subs(R3, R3, 1, ge); 1448 __ push(R0, ge); 1449 1450 // #3 1451 __ subs(R3, R3, 1, ge); 1452 __ push(R0, ge); 1453 1454 // #4 1455 __ subs(R3, R3, 1, ge); 1456 __ push(R0, ge); 1457 1458 __ b(loop, gt); 1459 #endif // AARCH64 1460 1461 // initialize fixed part of activation frame 1462 generate_fixed_frame(false); 1463 1464 __ restore_dispatch(); 1465 1466 // make sure method is not native & not abstract 1467 #ifdef ASSERT 1468 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1469 { 1470 Label L; 1471 __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1472 __ stop("tried to execute native method as non-native"); 1473 __ bind(L); 1474 } 1475 { Label L; 1476 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1477 __ stop("tried to execute abstract method in interpreter"); 1478 __ bind(L); 1479 } 1480 #endif 1481 1482 // increment invocation count & check for overflow 1483 Label invocation_counter_overflow; 1484 Label profile_method; 1485 Label profile_method_continue; 1486 if (inc_counter) { 1487 if (synchronized) { 1488 // Avoid unlocking method's monitor in case of exception, as it has not 1489 // been locked yet. 1490 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1491 } 1492 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1493 if (ProfileInterpreter) { 1494 __ bind(profile_method_continue); 1495 } 1496 } 1497 Label continue_after_compile; 1498 __ bind(continue_after_compile); 1499 1500 if (inc_counter && synchronized) { 1501 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1502 } 1503 #if R9_IS_SCRATCHED 1504 __ restore_method(); 1505 #endif 1506 1507 // check for synchronized methods 1508 // Must happen AFTER invocation_counter check and stack overflow check, 1509 // so method is not locked if overflows. 1510 // 1511 if (synchronized) { 1512 // Allocate monitor and lock method 1513 lock_method(); 1514 } else { 1515 // no synchronization necessary 1516 #ifdef ASSERT 1517 { Label L; 1518 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1519 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1520 __ stop("method needs synchronization"); 1521 __ bind(L); 1522 } 1523 #endif 1524 } 1525 1526 // start execution 1527 #ifdef ASSERT 1528 { Label L; 1529 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1530 __ cmp(Rtemp, Rstack_top); 1531 __ b(L, eq); 1532 __ stop("broken stack frame setup in interpreter"); 1533 __ bind(L); 1534 } 1535 #endif 1536 __ check_extended_sp(Rtemp); 1537 1538 // jvmti support 1539 __ notify_method_entry(); 1540 #if R9_IS_SCRATCHED 1541 __ restore_method(); 1542 #endif 1543 1544 __ dispatch_next(vtos); 1545 1546 // invocation counter overflow 1547 if (inc_counter) { 1548 if (ProfileInterpreter) { 1549 // We have decided to profile this method in the interpreter 1550 __ bind(profile_method); 1551 1552 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1553 __ set_method_data_pointer_for_bcp(); 1554 1555 __ b(profile_method_continue); 1556 } 1557 1558 // Handle overflow of counter and compile method 1559 __ bind(invocation_counter_overflow); 1560 generate_counter_overflow(continue_after_compile); 1561 } 1562 1563 return entry_point; 1564 } 1565 1566 //------------------------------------------------------------------------------------------------------------------------ 1567 // Exceptions 1568 1569 void TemplateInterpreterGenerator::generate_throw_exception() { 1570 // Entry point in previous activation (i.e., if the caller was interpreted) 1571 Interpreter::_rethrow_exception_entry = __ pc(); 1572 // Rexception_obj: exception 1573 1574 #ifndef AARCH64 1575 // Clear interpreter_frame_last_sp. 1576 __ mov(Rtemp, 0); 1577 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1578 #endif // !AARCH64 1579 1580 #if R9_IS_SCRATCHED 1581 __ restore_method(); 1582 #endif 1583 __ restore_bcp(); 1584 __ restore_dispatch(); 1585 __ restore_locals(); 1586 1587 #ifdef AARCH64 1588 __ restore_sp_after_call(Rtemp); 1589 #endif // AARCH64 1590 1591 // Entry point for exceptions thrown within interpreter code 1592 Interpreter::_throw_exception_entry = __ pc(); 1593 1594 // expression stack is undefined here 1595 // Rexception_obj: exception 1596 // Rbcp: exception bcp 1597 __ verify_oop(Rexception_obj); 1598 1599 // expression stack must be empty before entering the VM in case of an exception 1600 __ empty_expression_stack(); 1601 // find exception handler address and preserve exception oop 1602 __ mov(R1, Rexception_obj); 1603 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1); 1604 // R0: exception handler entry point 1605 // Rexception_obj: preserved exception oop 1606 // Rbcp: bcp for exception handler 1607 __ push_ptr(Rexception_obj); // push exception which is now the only value on the stack 1608 __ jump(R0); // jump to exception handler (may be _remove_activation_entry!) 1609 1610 // If the exception is not handled in the current frame the frame is removed and 1611 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1612 // 1613 // Note: At this point the bci is still the bxi for the instruction which caused 1614 // the exception and the expression stack is empty. Thus, for any VM calls 1615 // at this point, GC will find a legal oop map (with empty expression stack). 1616 1617 // In current activation 1618 // tos: exception 1619 // Rbcp: exception bcp 1620 1621 // 1622 // JVMTI PopFrame support 1623 // 1624 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1625 1626 #ifdef AARCH64 1627 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1628 #endif // AARCH64 1629 1630 __ empty_expression_stack(); 1631 1632 // Set the popframe_processing bit in _popframe_condition indicating that we are 1633 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1634 // popframe handling cycles. 1635 1636 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1637 __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit); 1638 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1639 1640 { 1641 // Check to see whether we are returning to a deoptimized frame. 1642 // (The PopFrame call ensures that the caller of the popped frame is 1643 // either interpreted or compiled and deoptimizes it if compiled.) 1644 // In this case, we can't call dispatch_next() after the frame is 1645 // popped, but instead must save the incoming arguments and restore 1646 // them after deoptimization has occurred. 1647 // 1648 // Note that we don't compare the return PC against the 1649 // deoptimization blob's unpack entry because of the presence of 1650 // adapter frames in C2. 1651 Label caller_not_deoptimized; 1652 __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize)); 1653 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0); 1654 __ cbnz_32(R0, caller_not_deoptimized); 1655 #ifdef AARCH64 1656 __ NOT_TESTED(); 1657 #endif 1658 1659 // Compute size of arguments for saving when returning to deoptimized caller 1660 __ restore_method(); 1661 __ ldr(R0, Address(Rmethod, Method::const_offset())); 1662 __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset())); 1663 1664 __ logical_shift_left(R1, R0, Interpreter::logStackElementSize); 1665 // Save these arguments 1666 __ restore_locals(); 1667 __ sub(R2, Rlocals, R1); 1668 __ add(R2, R2, wordSize); 1669 __ mov(R0, Rthread); 1670 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2); 1671 1672 __ remove_activation(vtos, LR, 1673 /* throw_monitor_exception */ false, 1674 /* install_monitor_exception */ false, 1675 /* notify_jvmdi */ false); 1676 1677 // Inform deoptimization that it is responsible for restoring these arguments 1678 __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit); 1679 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1680 1681 // Continue in deoptimization handler 1682 __ ret(); 1683 1684 __ bind(caller_not_deoptimized); 1685 } 1686 1687 __ remove_activation(vtos, R4, 1688 /* throw_monitor_exception */ false, 1689 /* install_monitor_exception */ false, 1690 /* notify_jvmdi */ false); 1691 1692 #ifndef AARCH64 1693 // Finish with popframe handling 1694 // A previous I2C followed by a deoptimization might have moved the 1695 // outgoing arguments further up the stack. PopFrame expects the 1696 // mutations to those outgoing arguments to be preserved and other 1697 // constraints basically require this frame to look exactly as 1698 // though it had previously invoked an interpreted activation with 1699 // no space between the top of the expression stack (current 1700 // last_sp) and the top of stack. Rather than force deopt to 1701 // maintain this kind of invariant all the time we call a small 1702 // fixup routine to move the mutated arguments onto the top of our 1703 // expression stack if necessary. 1704 __ mov(R1, SP); 1705 __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1706 // PC must point into interpreter here 1707 __ set_last_Java_frame(SP, FP, true, Rtemp); 1708 __ mov(R0, Rthread); 1709 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2); 1710 __ reset_last_Java_frame(Rtemp); 1711 #endif // !AARCH64 1712 1713 #ifdef AARCH64 1714 __ restore_sp_after_call(Rtemp); 1715 __ restore_stack_top(); 1716 #else 1717 // Restore the last_sp and null it out 1718 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1719 __ mov(Rtemp, (int)NULL_WORD); 1720 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1721 #endif // AARCH64 1722 1723 __ restore_bcp(); 1724 __ restore_dispatch(); 1725 __ restore_locals(); 1726 __ restore_method(); 1727 1728 // The method data pointer was incremented already during 1729 // call profiling. We have to restore the mdp for the current bcp. 1730 if (ProfileInterpreter) { 1731 __ set_method_data_pointer_for_bcp(); 1732 } 1733 1734 // Clear the popframe condition flag 1735 assert(JavaThread::popframe_inactive == 0, "adjust this code"); 1736 __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset())); 1737 1738 #if INCLUDE_JVMTI 1739 { 1740 Label L_done; 1741 1742 __ ldrb(Rtemp, Address(Rbcp, 0)); 1743 __ cmp(Rtemp, Bytecodes::_invokestatic); 1744 __ b(L_done, ne); 1745 1746 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1747 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1748 1749 // get local0 1750 __ ldr(R1, Address(Rlocals, 0)); 1751 __ mov(R2, Rmethod); 1752 __ mov(R3, Rbcp); 1753 __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3); 1754 1755 __ cbz(R0, L_done); 1756 1757 __ str(R0, Address(Rstack_top)); 1758 __ bind(L_done); 1759 } 1760 #endif // INCLUDE_JVMTI 1761 1762 __ dispatch_next(vtos); 1763 // end of PopFrame support 1764 1765 Interpreter::_remove_activation_entry = __ pc(); 1766 1767 // preserve exception over this code sequence 1768 __ pop_ptr(R0_tos); 1769 __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset())); 1770 // remove the activation (without doing throws on illegalMonitorExceptions) 1771 __ remove_activation(vtos, Rexception_pc, false, true, false); 1772 // restore exception 1773 __ get_vm_result(Rexception_obj, Rtemp); 1774 1775 // Inbetween activations - previous activation type unknown yet 1776 // compute continuation point - the continuation point expects 1777 // the following registers set up: 1778 // 1779 // Rexception_obj: exception 1780 // Rexception_pc: return address/pc that threw exception 1781 // SP: expression stack of caller 1782 // FP: frame pointer of caller 1783 __ mov(c_rarg0, Rthread); 1784 __ mov(c_rarg1, Rexception_pc); 1785 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 1786 // Note that an "issuing PC" is actually the next PC after the call 1787 1788 __ jump(R0); // jump to exception handler of caller 1789 } 1790 1791 1792 // 1793 // JVMTI ForceEarlyReturn support 1794 // 1795 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1796 address entry = __ pc(); 1797 1798 #ifdef AARCH64 1799 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1800 #endif // AARCH64 1801 1802 __ restore_bcp(); 1803 __ restore_dispatch(); 1804 __ restore_locals(); 1805 1806 __ empty_expression_stack(); 1807 1808 __ load_earlyret_value(state); 1809 1810 // Clear the earlyret state 1811 __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 1812 1813 assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code"); 1814 __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset())); 1815 1816 __ remove_activation(state, LR, 1817 false, /* throw_monitor_exception */ 1818 false, /* install_monitor_exception */ 1819 true); /* notify_jvmdi */ 1820 1821 #ifndef AARCH64 1822 // According to interpreter calling conventions, result is returned in R0/R1, 1823 // so ftos (S0) and dtos (D0) are moved to R0/R1. 1824 // This conversion should be done after remove_activation, as it uses 1825 // push(state) & pop(state) to preserve return value. 1826 __ convert_tos_to_retval(state); 1827 #endif // !AARCH64 1828 __ ret(); 1829 1830 return entry; 1831 } // end of ForceEarlyReturn support 1832 1833 1834 //------------------------------------------------------------------------------------------------------------------------ 1835 // Helper for vtos entry point generation 1836 1837 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1838 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1839 Label L; 1840 1841 #ifdef __SOFTFP__ 1842 dep = __ pc(); // fall through 1843 #else 1844 fep = __ pc(); __ push(ftos); __ b(L); 1845 dep = __ pc(); __ push(dtos); __ b(L); 1846 #endif // __SOFTFP__ 1847 1848 lep = __ pc(); __ push(ltos); __ b(L); 1849 1850 if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops 1851 aep = __ pc(); __ push(atos); __ b(L); 1852 } else { 1853 aep = __ pc(); // fall through 1854 } 1855 1856 #ifdef __SOFTFP__ 1857 fep = __ pc(); // fall through 1858 #endif // __SOFTFP__ 1859 1860 bep = cep = sep = // fall through 1861 iep = __ pc(); __ push(itos); // fall through 1862 vep = __ pc(); __ bind(L); // fall through 1863 generate_and_dispatch(t); 1864 } 1865 1866 //------------------------------------------------------------------------------------------------------------------------ 1867 1868 // Non-product code 1869 #ifndef PRODUCT 1870 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1871 address entry = __ pc(); 1872 1873 // prepare expression stack 1874 __ push(state); // save tosca 1875 1876 // pass tosca registers as arguments 1877 __ mov(R2, R0_tos); 1878 #ifdef AARCH64 1879 __ mov(R3, ZR); 1880 #else 1881 __ mov(R3, R1_tos_hi); 1882 #endif // AARCH64 1883 __ mov(R1, LR); // save return address 1884 1885 // call tracer 1886 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3); 1887 1888 __ mov(LR, R0); // restore return address 1889 __ pop(state); // restore tosca 1890 1891 // return 1892 __ ret(); 1893 1894 return entry; 1895 } 1896 1897 1898 void TemplateInterpreterGenerator::count_bytecode() { 1899 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true); 1900 } 1901 1902 1903 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1904 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true); 1905 } 1906 1907 1908 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1909 const Register Rindex_addr = R2_tmp; 1910 Label Lcontinue; 1911 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters); 1912 InlinedAddress Lindex((address)&BytecodePairHistogram::_index); 1913 const Register Rcounters_addr = R2_tmp; 1914 const Register Rindex = R4_tmp; 1915 1916 // calculate new index for counter: 1917 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes). 1918 // (_index >> log2_number_of_codes) is previous bytecode 1919 1920 __ ldr_literal(Rindex_addr, Lindex); 1921 __ ldr_s32(Rindex, Address(Rindex_addr)); 1922 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1923 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes)); 1924 __ str_32(Rindex, Address(Rindex_addr)); 1925 1926 // Rindex (R4) contains index of counter 1927 1928 __ ldr_literal(Rcounters_addr, Lcounters); 1929 __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1930 __ adds_32(Rtemp, Rtemp, 1); 1931 __ b(Lcontinue, mi); // avoid overflow 1932 __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1933 1934 __ b(Lcontinue); 1935 1936 __ bind_literal(Lindex); 1937 __ bind_literal(Lcounters); 1938 1939 __ bind(Lcontinue); 1940 } 1941 1942 1943 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1944 // Call a little run-time stub to avoid blow-up for each bytecode. 1945 // The run-time runtime saves the right registers, depending on 1946 // the tosca in-state for the given template. 1947 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1948 "entry must have been generated"); 1949 address trace_entry = Interpreter::trace_code(t->tos_in()); 1950 __ call(trace_entry, relocInfo::none); 1951 } 1952 1953 1954 void TemplateInterpreterGenerator::stop_interpreter_at() { 1955 Label Lcontinue; 1956 const Register stop_at = R2_tmp; 1957 1958 __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value); 1959 __ mov_slow(stop_at, StopInterpreterAt); 1960 1961 // test bytecode counter 1962 __ cmp(Rtemp, stop_at); 1963 __ b(Lcontinue, ne); 1964 1965 __ trace_state("stop_interpreter_at"); 1966 __ breakpoint(); 1967 1968 __ bind(Lcontinue); 1969 } 1970 #endif // !PRODUCT