1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/templateInterpreterGenerator.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "interpreter/bytecodeTracer.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/arrayOop.hpp" 37 #include "oops/methodData.hpp" 38 #include "oops/method.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/deoptimization.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/timer.hpp" 49 #include "runtime/vframeArray.hpp" 50 #include "utilities/debug.hpp" 51 #include <sys/types.h> 52 53 #ifndef PRODUCT 54 #include "oops/method.hpp" 55 #endif // !PRODUCT 56 57 #ifdef BUILTIN_SIM 58 #include "../../../../../../simulator/simulator.hpp" 59 #endif 60 61 // Size of interpreter code. Increase if too small. Interpreter will 62 // fail with a guarantee ("not enough space for interpreter generation"); 63 // if too small. 64 // Run with +PrintInterpreter to get the VM to print out the size. 65 // Max size with JVMTI 66 int TemplateInterpreter::InterpreterCodeSize = 200 * 1024; 67 68 #define __ _masm-> 69 70 //----------------------------------------------------------------------------- 71 72 extern "C" void entry(CodeBuffer*); 73 74 //----------------------------------------------------------------------------- 75 76 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 77 address entry = __ pc(); 78 79 __ andr(esp, esp, -16); 80 __ mov(c_rarg3, esp); 81 // rmethod 82 // rlocals 83 // c_rarg3: first stack arg - wordSize 84 85 // adjust sp 86 __ sub(sp, c_rarg3, 18 * wordSize); 87 __ str(lr, Address(__ pre(sp, -2 * wordSize))); 88 __ call_VM(noreg, 89 CAST_FROM_FN_PTR(address, 90 InterpreterRuntime::slow_signature_handler), 91 rmethod, rlocals, c_rarg3); 92 93 // r0: result handler 94 95 // Stack layout: 96 // rsp: return address <- sp 97 // 1 garbage 98 // 8 integer args (if static first is unused) 99 // 1 float/double identifiers 100 // 8 double args 101 // stack args <- esp 102 // garbage 103 // expression stack bottom 104 // bcp (NULL) 105 // ... 106 107 // Restore LR 108 __ ldr(lr, Address(__ post(sp, 2 * wordSize))); 109 110 // Do FP first so we can use c_rarg3 as temp 111 __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers 112 113 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { 114 const FloatRegister r = as_FloatRegister(i); 115 116 Label d, done; 117 118 __ tbnz(c_rarg3, i, d); 119 __ ldrs(r, Address(sp, (10 + i) * wordSize)); 120 __ b(done); 121 __ bind(d); 122 __ ldrd(r, Address(sp, (10 + i) * wordSize)); 123 __ bind(done); 124 } 125 126 // c_rarg0 contains the result from the call of 127 // InterpreterRuntime::slow_signature_handler so we don't touch it 128 // here. It will be loaded with the JNIEnv* later. 129 __ ldr(c_rarg1, Address(sp, 1 * wordSize)); 130 for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) { 131 Register rm = as_Register(i), rn = as_Register(i+1); 132 __ ldp(rm, rn, Address(sp, i * wordSize)); 133 } 134 135 __ add(sp, sp, 18 * wordSize); 136 __ ret(lr); 137 138 return entry; 139 } 140 141 142 // 143 // Various method entries 144 // 145 146 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 147 // rmethod: Method* 148 // r13: sender sp 149 // esp: args 150 151 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 152 153 // These don't need a safepoint check because they aren't virtually 154 // callable. We won't enter these intrinsics from compiled code. 155 // If in the future we added an intrinsic which was virtually callable 156 // we'd have to worry about how to safepoint so that this code is used. 157 158 // mathematical functions inlined by compiler 159 // (interpreter must provide identical implementation 160 // in order to avoid monotonicity bugs when switching 161 // from interpreter to compiler in the middle of some 162 // computation) 163 // 164 // stack: 165 // [ arg ] <-- esp 166 // [ arg ] 167 // retaddr in lr 168 169 address entry_point = NULL; 170 Register continuation = lr; 171 switch (kind) { 172 case Interpreter::java_lang_math_abs: 173 entry_point = __ pc(); 174 __ ldrd(v0, Address(esp)); 175 __ fabsd(v0, v0); 176 __ mov(sp, r13); // Restore caller's SP 177 break; 178 case Interpreter::java_lang_math_sqrt: 179 entry_point = __ pc(); 180 __ ldrd(v0, Address(esp)); 181 __ fsqrtd(v0, v0); 182 __ mov(sp, r13); 183 break; 184 case Interpreter::java_lang_math_sin : 185 case Interpreter::java_lang_math_cos : 186 case Interpreter::java_lang_math_tan : 187 case Interpreter::java_lang_math_log : 188 case Interpreter::java_lang_math_log10 : 189 case Interpreter::java_lang_math_exp : 190 entry_point = __ pc(); 191 __ ldrd(v0, Address(esp)); 192 __ mov(sp, r13); 193 __ mov(r19, lr); 194 continuation = r19; // The first callee-saved register 195 generate_transcendental_entry(kind, 1); 196 break; 197 case Interpreter::java_lang_math_pow : 198 entry_point = __ pc(); 199 __ mov(r19, lr); 200 continuation = r19; 201 __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize)); 202 __ ldrd(v1, Address(esp)); 203 __ mov(sp, r13); 204 generate_transcendental_entry(kind, 2); 205 break; 206 default: 207 ; 208 } 209 if (entry_point) { 210 __ br(continuation); 211 } 212 213 return entry_point; 214 } 215 216 // double trigonometrics and transcendentals 217 // static jdouble dsin(jdouble x); 218 // static jdouble dcos(jdouble x); 219 // static jdouble dtan(jdouble x); 220 // static jdouble dlog(jdouble x); 221 // static jdouble dlog10(jdouble x); 222 // static jdouble dexp(jdouble x); 223 // static jdouble dpow(jdouble x, jdouble y); 224 225 void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) { 226 address fn; 227 switch (kind) { 228 case Interpreter::java_lang_math_sin : 229 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 230 break; 231 case Interpreter::java_lang_math_cos : 232 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 233 break; 234 case Interpreter::java_lang_math_tan : 235 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 236 break; 237 case Interpreter::java_lang_math_log : 238 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 239 break; 240 case Interpreter::java_lang_math_log10 : 241 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 242 break; 243 case Interpreter::java_lang_math_exp : 244 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 245 break; 246 case Interpreter::java_lang_math_pow : 247 fpargs = 2; 248 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 249 break; 250 default: 251 ShouldNotReachHere(); 252 fn = NULL; // unreachable 253 } 254 const int gpargs = 0, rtype = 3; 255 __ mov(rscratch1, fn); 256 __ blrt(rscratch1, gpargs, fpargs, rtype); 257 } 258 259 // Abstract method entry 260 // Attempt to execute abstract method. Throw exception 261 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 262 // rmethod: Method* 263 // r13: sender SP 264 265 address entry_point = __ pc(); 266 267 // abstract method entry 268 269 // pop return address, reset last_sp to NULL 270 __ empty_expression_stack(); 271 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 272 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 273 274 // throw exception 275 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 276 InterpreterRuntime::throw_AbstractMethodError)); 277 // the call_VM checks for exception, so we should never return here. 278 __ should_not_reach_here(); 279 280 return entry_point; 281 } 282 283 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 284 address entry = __ pc(); 285 286 #ifdef ASSERT 287 { 288 Label L; 289 __ ldr(rscratch1, Address(rfp, 290 frame::interpreter_frame_monitor_block_top_offset * 291 wordSize)); 292 __ mov(rscratch2, sp); 293 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack 294 // grows negative) 295 __ br(Assembler::HS, L); // check if frame is complete 296 __ stop ("interpreter frame not set up"); 297 __ bind(L); 298 } 299 #endif // ASSERT 300 // Restore bcp under the assumption that the current frame is still 301 // interpreted 302 __ restore_bcp(); 303 304 // expression stack must be empty before entering the VM if an 305 // exception happened 306 __ empty_expression_stack(); 307 // throw exception 308 __ call_VM(noreg, 309 CAST_FROM_FN_PTR(address, 310 InterpreterRuntime::throw_StackOverflowError)); 311 return entry; 312 } 313 314 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 315 const char* name) { 316 address entry = __ pc(); 317 // expression stack must be empty before entering the VM if an 318 // exception happened 319 __ empty_expression_stack(); 320 // setup parameters 321 // ??? convention: expect aberrant index in register r1 322 __ movw(c_rarg2, r1); 323 __ mov(c_rarg1, (address)name); 324 __ call_VM(noreg, 325 CAST_FROM_FN_PTR(address, 326 InterpreterRuntime:: 327 throw_ArrayIndexOutOfBoundsException), 328 c_rarg1, c_rarg2); 329 return entry; 330 } 331 332 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 333 address entry = __ pc(); 334 335 // object is at TOS 336 __ pop(c_rarg1); 337 338 // expression stack must be empty before entering the VM if an 339 // exception happened 340 __ empty_expression_stack(); 341 342 __ call_VM(noreg, 343 CAST_FROM_FN_PTR(address, 344 InterpreterRuntime:: 345 throw_ClassCastException), 346 c_rarg1); 347 return entry; 348 } 349 350 address TemplateInterpreterGenerator::generate_exception_handler_common( 351 const char* name, const char* message, bool pass_oop) { 352 assert(!pass_oop || message == NULL, "either oop or message but not both"); 353 address entry = __ pc(); 354 if (pass_oop) { 355 // object is at TOS 356 __ pop(c_rarg2); 357 } 358 // expression stack must be empty before entering the VM if an 359 // exception happened 360 __ empty_expression_stack(); 361 // setup parameters 362 __ lea(c_rarg1, Address((address)name)); 363 if (pass_oop) { 364 __ call_VM(r0, CAST_FROM_FN_PTR(address, 365 InterpreterRuntime:: 366 create_klass_exception), 367 c_rarg1, c_rarg2); 368 } else { 369 // kind of lame ExternalAddress can't take NULL because 370 // external_word_Relocation will assert. 371 if (message != NULL) { 372 __ lea(c_rarg2, Address((address)message)); 373 } else { 374 __ mov(c_rarg2, NULL_WORD); 375 } 376 __ call_VM(r0, 377 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 378 c_rarg1, c_rarg2); 379 } 380 // throw exception 381 __ b(address(Interpreter::throw_exception_entry())); 382 return entry; 383 } 384 385 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 386 address entry = __ pc(); 387 // NULL last_sp until next java call 388 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 389 __ dispatch_next(state); 390 return entry; 391 } 392 393 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 394 address entry = __ pc(); 395 396 // Restore stack bottom in case i2c adjusted stack 397 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 398 // and NULL it as marker that esp is now tos until next java call 399 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 400 __ restore_bcp(); 401 __ restore_locals(); 402 __ restore_constant_pool_cache(); 403 __ get_method(rmethod); 404 405 // Pop N words from the stack 406 __ get_cache_and_index_at_bcp(r1, r2, 1, index_size); 407 __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 408 __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask); 409 410 __ add(esp, esp, r1, Assembler::LSL, 3); 411 412 // Restore machine SP 413 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 414 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 415 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 416 __ ldr(rscratch2, 417 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 418 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 419 __ andr(sp, rscratch1, -16); 420 421 #ifndef PRODUCT 422 // tell the simulator that the method has been reentered 423 if (NotifySimulator) { 424 __ notify(Assembler::method_reentry); 425 } 426 #endif 427 __ get_dispatch(); 428 __ dispatch_next(state, step); 429 430 return entry; 431 } 432 433 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 434 int step) { 435 address entry = __ pc(); 436 __ restore_bcp(); 437 __ restore_locals(); 438 __ restore_constant_pool_cache(); 439 __ get_method(rmethod); 440 __ get_dispatch(); 441 442 // Calculate stack limit 443 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 444 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 445 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 446 __ ldr(rscratch2, 447 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 448 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 449 __ andr(sp, rscratch1, -16); 450 451 // Restore expression stack pointer 452 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 453 // NULL last_sp until next java call 454 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 455 456 #if INCLUDE_JVMCI 457 // Check if we need to take lock at entry of synchronized method. 458 if (UseJVMCICompiler) { 459 Label L; 460 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 461 __ cbz(rscratch1, L); 462 // Clear flag. 463 __ strb(zr, Address(rthread, JavaThread::pending_monitorenter_offset())); 464 // Take lock. 465 lock_method(); 466 __ bind(L); 467 } 468 #endif 469 // handle exceptions 470 { 471 Label L; 472 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 473 __ cbz(rscratch1, L); 474 __ call_VM(noreg, 475 CAST_FROM_FN_PTR(address, 476 InterpreterRuntime::throw_pending_exception)); 477 __ should_not_reach_here(); 478 __ bind(L); 479 } 480 481 __ dispatch_next(state, step); 482 return entry; 483 } 484 485 address TemplateInterpreterGenerator::generate_result_handler_for( 486 BasicType type) { 487 address entry = __ pc(); 488 switch (type) { 489 case T_BOOLEAN: __ uxtb(r0, r0); break; 490 case T_CHAR : __ uxth(r0, r0); break; 491 case T_BYTE : __ sxtb(r0, r0); break; 492 case T_SHORT : __ sxth(r0, r0); break; 493 case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this 494 case T_LONG : /* nothing to do */ break; 495 case T_VOID : /* nothing to do */ break; 496 case T_FLOAT : /* nothing to do */ break; 497 case T_DOUBLE : /* nothing to do */ break; 498 case T_OBJECT : 499 // retrieve result from frame 500 __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 501 // and verify it 502 __ verify_oop(r0); 503 break; 504 default : ShouldNotReachHere(); 505 } 506 __ ret(lr); // return from result handler 507 return entry; 508 } 509 510 address TemplateInterpreterGenerator::generate_safept_entry_for( 511 TosState state, 512 address runtime_entry) { 513 address entry = __ pc(); 514 __ push(state); 515 __ call_VM(noreg, runtime_entry); 516 __ membar(Assembler::AnyAny); 517 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 518 return entry; 519 } 520 521 // Helpers for commoning out cases in the various type of method entries. 522 // 523 524 525 // increment invocation count & check for overflow 526 // 527 // Note: checking for negative value instead of overflow 528 // so we have a 'sticky' overflow test 529 // 530 // rmethod: method 531 // 532 void TemplateInterpreterGenerator::generate_counter_incr( 533 Label* overflow, 534 Label* profile_method, 535 Label* profile_method_continue) { 536 Label done; 537 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 538 if (TieredCompilation) { 539 int increment = InvocationCounter::count_increment; 540 Label no_mdo; 541 if (ProfileInterpreter) { 542 // Are we profiling? 543 __ ldr(r0, Address(rmethod, Method::method_data_offset())); 544 __ cbz(r0, no_mdo); 545 // Increment counter in the MDO 546 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) + 547 in_bytes(InvocationCounter::counter_offset())); 548 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset())); 549 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow); 550 __ b(done); 551 } 552 __ bind(no_mdo); 553 // Increment counter in MethodCounters 554 const Address invocation_counter(rscratch2, 555 MethodCounters::invocation_counter_offset() + 556 InvocationCounter::counter_offset()); 557 __ get_method_counters(rmethod, rscratch2, done); 558 const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset())); 559 __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow); 560 __ bind(done); 561 } else { // not TieredCompilation 562 const Address backedge_counter(rscratch2, 563 MethodCounters::backedge_counter_offset() + 564 InvocationCounter::counter_offset()); 565 const Address invocation_counter(rscratch2, 566 MethodCounters::invocation_counter_offset() + 567 InvocationCounter::counter_offset()); 568 569 __ get_method_counters(rmethod, rscratch2, done); 570 571 if (ProfileInterpreter) { // %%% Merge this into MethodData* 572 __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 573 __ addw(r1, r1, 1); 574 __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 575 } 576 // Update standard invocation counters 577 __ ldrw(r1, invocation_counter); 578 __ ldrw(r0, backedge_counter); 579 580 __ addw(r1, r1, InvocationCounter::count_increment); 581 __ andw(r0, r0, InvocationCounter::count_mask_value); 582 583 __ strw(r1, invocation_counter); 584 __ addw(r0, r0, r1); // add both counters 585 586 // profile_method is non-null only for interpreted method so 587 // profile_method != NULL == !native_call 588 589 if (ProfileInterpreter && profile_method != NULL) { 590 // Test to see if we should create a method data oop 591 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 592 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 593 __ cmpw(r0, rscratch2); 594 __ br(Assembler::LT, *profile_method_continue); 595 596 // if no method data exists, go to profile_method 597 __ test_method_data_pointer(rscratch2, *profile_method); 598 } 599 600 { 601 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 602 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 603 __ cmpw(r0, rscratch2); 604 __ br(Assembler::HS, *overflow); 605 } 606 __ bind(done); 607 } 608 } 609 610 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 611 612 // Asm interpreter on entry 613 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 614 // Everything as it was on entry 615 616 // InterpreterRuntime::frequency_counter_overflow takes two 617 // arguments, the first (thread) is passed by call_VM, the second 618 // indicates if the counter overflow occurs at a backwards branch 619 // (NULL bcp). We pass zero for it. The call returns the address 620 // of the verified entry point for the method or NULL if the 621 // compilation did not complete (either went background or bailed 622 // out). 623 __ mov(c_rarg1, 0); 624 __ call_VM(noreg, 625 CAST_FROM_FN_PTR(address, 626 InterpreterRuntime::frequency_counter_overflow), 627 c_rarg1); 628 629 __ b(do_continue); 630 } 631 632 // See if we've got enough room on the stack for locals plus overhead 633 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 634 // without going through the signal handler, i.e., reserved and yellow zones 635 // will not be made usable. The shadow zone must suffice to handle the 636 // overflow. 637 // The expression stack grows down incrementally, so the normal guard 638 // page mechanism will work for that. 639 // 640 // NOTE: Since the additional locals are also always pushed (wasn't 641 // obvious in generate_method_entry) so the guard should work for them 642 // too. 643 // 644 // Args: 645 // r3: number of additional locals this frame needs (what we must check) 646 // rmethod: Method* 647 // 648 // Kills: 649 // r0 650 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 651 652 // monitor entry size: see picture of stack set 653 // (generate_method_entry) and frame_amd64.hpp 654 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 655 656 // total overhead size: entry_size + (saved rbp through expr stack 657 // bottom). be sure to change this if you add/subtract anything 658 // to/from the overhead area 659 const int overhead_size = 660 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 661 662 const int page_size = os::vm_page_size(); 663 664 Label after_frame_check; 665 666 // see if the frame is greater than one page in size. If so, 667 // then we need to verify there is enough stack space remaining 668 // for the additional locals. 669 // 670 // Note that we use SUBS rather than CMP here because the immediate 671 // field of this instruction may overflow. SUBS can cope with this 672 // because it is a macro that will expand to some number of MOV 673 // instructions and a register operation. 674 __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize); 675 __ br(Assembler::LS, after_frame_check); 676 677 // compute rsp as if this were going to be the last frame on 678 // the stack before the red zone 679 680 // locals + overhead, in bytes 681 __ mov(r0, overhead_size); 682 __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter. 683 684 const Address stack_limit(rthread, JavaThread::stack_overflow_limit_offset()); 685 __ ldr(rscratch1, stack_limit); 686 687 #ifdef ASSERT 688 Label limit_okay; 689 // Verify that thread stack limit is non-zero. 690 __ cbnz(rscratch1, limit_okay); 691 __ stop("stack overflow limit is zero"); 692 __ bind(limit_okay); 693 #endif 694 695 // Add stack limit to locals. 696 __ add(r0, r0, rscratch1); 697 698 // Check against the current stack bottom. 699 __ cmp(sp, r0); 700 __ br(Assembler::HI, after_frame_check); 701 702 // Remove the incoming args, peeling the machine SP back to where it 703 // was in the caller. This is not strictly necessary, but unless we 704 // do so the stack frame may have a garbage FP; this ensures a 705 // correct call stack that we can always unwind. The ANDR should be 706 // unnecessary because the sender SP in r13 is always aligned, but 707 // it doesn't hurt. 708 __ andr(sp, r13, -16); 709 710 // Note: the restored frame is not necessarily interpreted. 711 // Use the shared runtime version of the StackOverflowError. 712 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 713 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); 714 715 // all done with frame size check 716 __ bind(after_frame_check); 717 } 718 719 // Allocate monitor and lock method (asm interpreter) 720 // 721 // Args: 722 // rmethod: Method* 723 // rlocals: locals 724 // 725 // Kills: 726 // r0 727 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 728 // rscratch1, rscratch2 (scratch regs) 729 void TemplateInterpreterGenerator::lock_method() { 730 // synchronize method 731 const Address access_flags(rmethod, Method::access_flags_offset()); 732 const Address monitor_block_top( 733 rfp, 734 frame::interpreter_frame_monitor_block_top_offset * wordSize); 735 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 736 737 #ifdef ASSERT 738 { 739 Label L; 740 __ ldrw(r0, access_flags); 741 __ tst(r0, JVM_ACC_SYNCHRONIZED); 742 __ br(Assembler::NE, L); 743 __ stop("method doesn't need synchronization"); 744 __ bind(L); 745 } 746 #endif // ASSERT 747 748 // get synchronization object 749 { 750 Label done; 751 __ ldrw(r0, access_flags); 752 __ tst(r0, JVM_ACC_STATIC); 753 // get receiver (assume this is frequent case) 754 __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 755 __ br(Assembler::EQ, done); 756 __ load_mirror(r0, rmethod); 757 758 #ifdef ASSERT 759 { 760 Label L; 761 __ cbnz(r0, L); 762 __ stop("synchronization object is NULL"); 763 __ bind(L); 764 } 765 #endif // ASSERT 766 767 __ bind(done); 768 } 769 770 // add space for monitor & lock 771 __ sub(sp, sp, entry_size); // add space for a monitor entry 772 __ sub(esp, esp, entry_size); 773 __ mov(rscratch1, esp); 774 __ str(rscratch1, monitor_block_top); // set new monitor block top 775 // store object 776 __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); 777 __ mov(c_rarg1, esp); // object address 778 __ lock_object(c_rarg1); 779 } 780 781 // Generate a fixed interpreter frame. This is identical setup for 782 // interpreted methods and for native methods hence the shared code. 783 // 784 // Args: 785 // lr: return address 786 // rmethod: Method* 787 // rlocals: pointer to locals 788 // rcpool: cp cache 789 // stack_pointer: previous sp 790 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 791 // initialize fixed part of activation frame 792 if (native_call) { 793 __ sub(esp, sp, 14 * wordSize); 794 __ mov(rbcp, zr); 795 __ stp(esp, zr, Address(__ pre(sp, -14 * wordSize))); 796 // add 2 zero-initialized slots for native calls 797 __ stp(zr, zr, Address(sp, 12 * wordSize)); 798 } else { 799 __ sub(esp, sp, 12 * wordSize); 800 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod 801 __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase 802 __ stp(esp, rbcp, Address(__ pre(sp, -12 * wordSize))); 803 } 804 805 if (ProfileInterpreter) { 806 Label method_data_continue; 807 __ ldr(rscratch1, Address(rmethod, Method::method_data_offset())); 808 __ cbz(rscratch1, method_data_continue); 809 __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset()))); 810 __ bind(method_data_continue); 811 __ stp(rscratch1, rmethod, Address(sp, 6 * wordSize)); // save Method* and mdp (method data pointer) 812 } else { 813 __ stp(zr, rmethod, Address(sp, 6 * wordSize)); // save Method* (no mdp) 814 } 815 816 // Get mirror and store it in the frame as GC root for this Method* 817 __ load_mirror(rscratch1, rmethod); 818 __ stp(rscratch1, zr, Address(sp, 4 * wordSize)); 819 820 __ ldr(rcpool, Address(rmethod, Method::const_offset())); 821 __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); 822 __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes())); 823 __ stp(rlocals, rcpool, Address(sp, 2 * wordSize)); 824 825 __ stp(rfp, lr, Address(sp, 10 * wordSize)); 826 __ lea(rfp, Address(sp, 10 * wordSize)); 827 828 // set sender sp 829 // leave last_sp as null 830 __ stp(zr, r13, Address(sp, 8 * wordSize)); 831 832 // Move SP out of the way 833 if (! native_call) { 834 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 835 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 836 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 837 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3); 838 __ andr(sp, rscratch1, -16); 839 } 840 } 841 842 // End of helpers 843 844 // Various method entries 845 //------------------------------------------------------------------------------------------------------------------------ 846 // 847 // 848 849 // Method entry for java.lang.ref.Reference.get. 850 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 851 #if INCLUDE_ALL_GCS 852 // Code: _aload_0, _getfield, _areturn 853 // parameter size = 1 854 // 855 // The code that gets generated by this routine is split into 2 parts: 856 // 1. The "intrinsified" code for G1 (or any SATB based GC), 857 // 2. The slow path - which is an expansion of the regular method entry. 858 // 859 // Notes:- 860 // * In the G1 code we do not check whether we need to block for 861 // a safepoint. If G1 is enabled then we must execute the specialized 862 // code for Reference.get (except when the Reference object is null) 863 // so that we can log the value in the referent field with an SATB 864 // update buffer. 865 // If the code for the getfield template is modified so that the 866 // G1 pre-barrier code is executed when the current method is 867 // Reference.get() then going through the normal method entry 868 // will be fine. 869 // * The G1 code can, however, check the receiver object (the instance 870 // of java.lang.Reference) and jump to the slow path if null. If the 871 // Reference object is null then we obviously cannot fetch the referent 872 // and so we don't need to call the G1 pre-barrier. Thus we can use the 873 // regular method entry code to generate the NPE. 874 // 875 // This code is based on generate_accessor_enty. 876 // 877 // rmethod: Method* 878 // r13: senderSP must preserve for slow path, set SP to it on fast path 879 880 address entry = __ pc(); 881 882 const int referent_offset = java_lang_ref_Reference::referent_offset; 883 guarantee(referent_offset > 0, "referent offset not initialized"); 884 885 if (UseG1GC) { 886 Label slow_path; 887 const Register local_0 = c_rarg0; 888 // Check if local 0 != NULL 889 // If the receiver is null then it is OK to jump to the slow path. 890 __ ldr(local_0, Address(esp, 0)); 891 __ cbz(local_0, slow_path); 892 893 894 // Load the value of the referent field. 895 const Address field_address(local_0, referent_offset); 896 __ load_heap_oop(local_0, field_address); 897 898 // Generate the G1 pre-barrier code to log the value of 899 // the referent field in an SATB buffer. 900 __ enter(); // g1_write may call runtime 901 __ g1_write_barrier_pre(noreg /* obj */, 902 local_0 /* pre_val */, 903 rthread /* thread */, 904 rscratch2 /* tmp */, 905 true /* tosca_live */, 906 true /* expand_call */); 907 __ leave(); 908 // areturn 909 __ andr(sp, r13, -16); // done with stack 910 __ ret(lr); 911 912 // generate a vanilla interpreter entry as the slow path 913 __ bind(slow_path); 914 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 915 return entry; 916 } 917 #endif // INCLUDE_ALL_GCS 918 919 // If G1 is not enabled then attempt to go through the accessor entry point 920 // Reference.get is an accessor 921 return NULL; 922 } 923 924 /** 925 * Method entry for static native methods: 926 * int java.util.zip.CRC32.update(int crc, int b) 927 */ 928 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 929 if (UseCRC32Intrinsics) { 930 address entry = __ pc(); 931 932 // rmethod: Method* 933 // r13: senderSP must preserved for slow path 934 // esp: args 935 936 Label slow_path; 937 // If we need a safepoint check, generate full interpreter entry. 938 ExternalAddress state(SafepointSynchronize::address_of_state()); 939 unsigned long offset; 940 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 941 __ ldrw(rscratch1, Address(rscratch1, offset)); 942 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 943 __ cbnz(rscratch1, slow_path); 944 945 // We don't generate local frame and don't align stack because 946 // we call stub code and there is no safepoint on this path. 947 948 // Load parameters 949 const Register crc = c_rarg0; // crc 950 const Register val = c_rarg1; // source java byte value 951 const Register tbl = c_rarg2; // scratch 952 953 // Arguments are reversed on java expression stack 954 __ ldrw(val, Address(esp, 0)); // byte value 955 __ ldrw(crc, Address(esp, wordSize)); // Initial CRC 956 957 __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); 958 __ add(tbl, tbl, offset); 959 960 __ ornw(crc, zr, crc); // ~crc 961 __ update_byte_crc32(crc, val, tbl); 962 __ ornw(crc, zr, crc); // ~crc 963 964 // result in c_rarg0 965 966 __ andr(sp, r13, -16); 967 __ ret(lr); 968 969 // generate a vanilla native entry as the slow path 970 __ bind(slow_path); 971 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 972 return entry; 973 } 974 return NULL; 975 } 976 977 /** 978 * Method entry for static native methods: 979 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 980 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 981 */ 982 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 983 if (UseCRC32Intrinsics) { 984 address entry = __ pc(); 985 986 // rmethod,: Method* 987 // r13: senderSP must preserved for slow path 988 989 Label slow_path; 990 // If we need a safepoint check, generate full interpreter entry. 991 ExternalAddress state(SafepointSynchronize::address_of_state()); 992 unsigned long offset; 993 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 994 __ ldrw(rscratch1, Address(rscratch1, offset)); 995 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 996 __ cbnz(rscratch1, slow_path); 997 998 // We don't generate local frame and don't align stack because 999 // we call stub code and there is no safepoint on this path. 1000 1001 // Load parameters 1002 const Register crc = c_rarg0; // crc 1003 const Register buf = c_rarg1; // source java byte array address 1004 const Register len = c_rarg2; // length 1005 const Register off = len; // offset (never overlaps with 'len') 1006 1007 // Arguments are reversed on java expression stack 1008 // Calculate address of start element 1009 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 1010 __ ldr(buf, Address(esp, 2*wordSize)); // long buf 1011 __ ldrw(off, Address(esp, wordSize)); // offset 1012 __ add(buf, buf, off); // + offset 1013 __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC 1014 } else { 1015 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array 1016 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 1017 __ ldrw(off, Address(esp, wordSize)); // offset 1018 __ add(buf, buf, off); // + offset 1019 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC 1020 } 1021 // Can now load 'len' since we're finished with 'off' 1022 __ ldrw(len, Address(esp, 0x0)); // Length 1023 1024 __ andr(sp, r13, -16); // Restore the caller's SP 1025 1026 // We are frameless so we can just jump to the stub. 1027 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32())); 1028 1029 // generate a vanilla native entry as the slow path 1030 __ bind(slow_path); 1031 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 1032 return entry; 1033 } 1034 return NULL; 1035 } 1036 1037 // Not supported 1038 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1039 return NULL; 1040 } 1041 1042 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1043 // Bang each page in the shadow zone. We can't assume it's been done for 1044 // an interpreter frame with greater than a page of locals, so each page 1045 // needs to be checked. Only true for non-native. 1046 if (UseStackBanging) { 1047 const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size(); 1048 const int start_page = native_call ? n_shadow_pages : 1; 1049 const int page_size = os::vm_page_size(); 1050 for (int pages = start_page; pages <= n_shadow_pages ; pages++) { 1051 __ sub(rscratch2, sp, pages*page_size); 1052 __ str(zr, Address(rscratch2)); 1053 } 1054 } 1055 } 1056 1057 1058 // Interpreter stub for calling a native method. (asm interpreter) 1059 // This sets up a somewhat different looking stack for calling the 1060 // native method than the typical interpreter frame setup. 1061 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1062 // determine code generation flags 1063 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1064 1065 // r1: Method* 1066 // rscratch1: sender sp 1067 1068 address entry_point = __ pc(); 1069 1070 const Address constMethod (rmethod, Method::const_offset()); 1071 const Address access_flags (rmethod, Method::access_flags_offset()); 1072 const Address size_of_parameters(r2, ConstMethod:: 1073 size_of_parameters_offset()); 1074 1075 // get parameter size (always needed) 1076 __ ldr(r2, constMethod); 1077 __ load_unsigned_short(r2, size_of_parameters); 1078 1079 // Native calls don't need the stack size check since they have no 1080 // expression stack and the arguments are already on the stack and 1081 // we only add a handful of words to the stack. 1082 1083 // rmethod: Method* 1084 // r2: size of parameters 1085 // rscratch1: sender sp 1086 1087 // for natives the size of locals is zero 1088 1089 // compute beginning of parameters (rlocals) 1090 __ add(rlocals, esp, r2, ext::uxtx, 3); 1091 __ add(rlocals, rlocals, -wordSize); 1092 1093 // Pull SP back to minimum size: this avoids holes in the stack 1094 __ andr(sp, esp, -16); 1095 1096 // initialize fixed part of activation frame 1097 generate_fixed_frame(true); 1098 #ifndef PRODUCT 1099 // tell the simulator that a method has been entered 1100 if (NotifySimulator) { 1101 __ notify(Assembler::method_entry); 1102 } 1103 #endif 1104 1105 // make sure method is native & not abstract 1106 #ifdef ASSERT 1107 __ ldrw(r0, access_flags); 1108 { 1109 Label L; 1110 __ tst(r0, JVM_ACC_NATIVE); 1111 __ br(Assembler::NE, L); 1112 __ stop("tried to execute non-native method as native"); 1113 __ bind(L); 1114 } 1115 { 1116 Label L; 1117 __ tst(r0, JVM_ACC_ABSTRACT); 1118 __ br(Assembler::EQ, L); 1119 __ stop("tried to execute abstract method in interpreter"); 1120 __ bind(L); 1121 } 1122 #endif 1123 1124 // Since at this point in the method invocation the exception 1125 // handler would try to exit the monitor of synchronized methods 1126 // which hasn't been entered yet, we set the thread local variable 1127 // _do_not_unlock_if_synchronized to true. The remove_activation 1128 // will check this flag. 1129 1130 const Address do_not_unlock_if_synchronized(rthread, 1131 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1132 __ mov(rscratch2, true); 1133 __ strb(rscratch2, do_not_unlock_if_synchronized); 1134 1135 // increment invocation count & check for overflow 1136 Label invocation_counter_overflow; 1137 if (inc_counter) { 1138 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1139 } 1140 1141 Label continue_after_compile; 1142 __ bind(continue_after_compile); 1143 1144 bang_stack_shadow_pages(true); 1145 1146 // reset the _do_not_unlock_if_synchronized flag 1147 __ strb(zr, do_not_unlock_if_synchronized); 1148 1149 // check for synchronized methods 1150 // Must happen AFTER invocation_counter check and stack overflow check, 1151 // so method is not locked if overflows. 1152 if (synchronized) { 1153 lock_method(); 1154 } else { 1155 // no synchronization necessary 1156 #ifdef ASSERT 1157 { 1158 Label L; 1159 __ ldrw(r0, access_flags); 1160 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1161 __ br(Assembler::EQ, L); 1162 __ stop("method needs synchronization"); 1163 __ bind(L); 1164 } 1165 #endif 1166 } 1167 1168 // start execution 1169 #ifdef ASSERT 1170 { 1171 Label L; 1172 const Address monitor_block_top(rfp, 1173 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1174 __ ldr(rscratch1, monitor_block_top); 1175 __ cmp(esp, rscratch1); 1176 __ br(Assembler::EQ, L); 1177 __ stop("broken stack frame setup in interpreter"); 1178 __ bind(L); 1179 } 1180 #endif 1181 1182 // jvmti support 1183 __ notify_method_entry(); 1184 1185 // work registers 1186 const Register t = r17; 1187 const Register result_handler = r19; 1188 1189 // allocate space for parameters 1190 __ ldr(t, Address(rmethod, Method::const_offset())); 1191 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1192 1193 __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize); 1194 __ andr(sp, rscratch1, -16); 1195 __ mov(esp, rscratch1); 1196 1197 // get signature handler 1198 { 1199 Label L; 1200 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1201 __ cbnz(t, L); 1202 __ call_VM(noreg, 1203 CAST_FROM_FN_PTR(address, 1204 InterpreterRuntime::prepare_native_call), 1205 rmethod); 1206 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1207 __ bind(L); 1208 } 1209 1210 // call signature handler 1211 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1212 "adjust this code"); 1213 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1214 "adjust this code"); 1215 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1216 "adjust this code"); 1217 1218 // The generated handlers do not touch rmethod (the method). 1219 // However, large signatures cannot be cached and are generated 1220 // each time here. The slow-path generator can do a GC on return, 1221 // so we must reload it after the call. 1222 __ blr(t); 1223 __ get_method(rmethod); // slow path can do a GC, reload rmethod 1224 1225 1226 // result handler is in r0 1227 // set result handler 1228 __ mov(result_handler, r0); 1229 // pass mirror handle if static call 1230 { 1231 Label L; 1232 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1233 __ tbz(t, exact_log2(JVM_ACC_STATIC), L); 1234 // get mirror 1235 __ load_mirror(t, rmethod); 1236 // copy mirror into activation frame 1237 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1238 // pass handle to mirror 1239 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize); 1240 __ bind(L); 1241 } 1242 1243 // get native function entry point in r10 1244 { 1245 Label L; 1246 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1247 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1248 __ mov(rscratch2, unsatisfied); 1249 __ ldr(rscratch2, rscratch2); 1250 __ cmp(r10, rscratch2); 1251 __ br(Assembler::NE, L); 1252 __ call_VM(noreg, 1253 CAST_FROM_FN_PTR(address, 1254 InterpreterRuntime::prepare_native_call), 1255 rmethod); 1256 __ get_method(rmethod); 1257 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1258 __ bind(L); 1259 } 1260 1261 // pass JNIEnv 1262 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset())); 1263 1264 // It is enough that the pc() points into the right code 1265 // segment. It does not have to be the correct return pc. 1266 __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1); 1267 1268 // change thread state 1269 #ifdef ASSERT 1270 { 1271 Label L; 1272 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); 1273 __ cmp(t, _thread_in_Java); 1274 __ br(Assembler::EQ, L); 1275 __ stop("Wrong thread state in native stub"); 1276 __ bind(L); 1277 } 1278 #endif 1279 1280 // Change state to native 1281 __ mov(rscratch1, _thread_in_native); 1282 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1283 __ stlrw(rscratch1, rscratch2); 1284 1285 // Call the native method. 1286 __ blrt(r10, rscratch1); 1287 __ maybe_isb(); 1288 __ get_method(rmethod); 1289 // result potentially in r0 or v0 1290 1291 // make room for the pushes we're about to do 1292 __ sub(rscratch1, esp, 4 * wordSize); 1293 __ andr(sp, rscratch1, -16); 1294 1295 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1296 // in order to extract the result of a method call. If the order of these 1297 // pushes change or anything else is added to the stack then the code in 1298 // interpreter_frame_result must also change. 1299 __ push(dtos); 1300 __ push(ltos); 1301 1302 // change thread state 1303 __ mov(rscratch1, _thread_in_native_trans); 1304 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1305 __ stlrw(rscratch1, rscratch2); 1306 1307 if (os::is_MP()) { 1308 if (UseMembar) { 1309 // Force this write out before the read below 1310 __ dsb(Assembler::SY); 1311 } else { 1312 // Write serialization page so VM thread can do a pseudo remote membar. 1313 // We use the current thread pointer to calculate a thread specific 1314 // offset to write to within the page. This minimizes bus traffic 1315 // due to cache line collision. 1316 __ serialize_memory(rthread, rscratch2); 1317 } 1318 } 1319 1320 // check for safepoint operation in progress and/or pending suspend requests 1321 { 1322 Label Continue; 1323 { 1324 unsigned long offset; 1325 __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset); 1326 __ ldrw(rscratch2, Address(rscratch2, offset)); 1327 } 1328 assert(SafepointSynchronize::_not_synchronized == 0, 1329 "SafepointSynchronize::_not_synchronized"); 1330 Label L; 1331 __ cbnz(rscratch2, L); 1332 __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset())); 1333 __ cbz(rscratch2, Continue); 1334 __ bind(L); 1335 1336 // Don't use call_VM as it will see a possible pending exception 1337 // and forward it and never return here preventing us from 1338 // clearing _last_native_pc down below. So we do a runtime call by 1339 // hand. 1340 // 1341 __ mov(c_rarg0, rthread); 1342 __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1343 __ blrt(rscratch2, 1, 0, 0); 1344 __ maybe_isb(); 1345 __ get_method(rmethod); 1346 __ reinit_heapbase(); 1347 __ bind(Continue); 1348 } 1349 1350 // change thread state 1351 __ mov(rscratch1, _thread_in_Java); 1352 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1353 __ stlrw(rscratch1, rscratch2); 1354 1355 // reset_last_Java_frame 1356 __ reset_last_Java_frame(true, true); 1357 1358 // reset handle block 1359 __ ldr(t, Address(rthread, JavaThread::active_handles_offset())); 1360 __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); 1361 1362 // If result is an oop unbox and store it in frame where gc will see it 1363 // and result handler will pick it up 1364 1365 { 1366 Label no_oop, store_result; 1367 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1368 __ cmp(t, result_handler); 1369 __ br(Assembler::NE, no_oop); 1370 // retrieve result 1371 __ pop(ltos); 1372 __ cbz(r0, store_result); 1373 __ ldr(r0, Address(r0, 0)); 1374 __ bind(store_result); 1375 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 1376 // keep stack depth as expected by pushing oop which will eventually be discarded 1377 __ push(ltos); 1378 __ bind(no_oop); 1379 } 1380 1381 { 1382 Label no_reguard; 1383 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1384 __ ldrw(rscratch1, Address(rscratch1)); 1385 __ cmp(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled); 1386 __ br(Assembler::NE, no_reguard); 1387 1388 __ pusha(); // XXX only save smashed registers 1389 __ mov(c_rarg0, rthread); 1390 __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1391 __ blrt(rscratch2, 0, 0, 0); 1392 __ popa(); // XXX only restore smashed registers 1393 __ bind(no_reguard); 1394 } 1395 1396 // The method register is junk from after the thread_in_native transition 1397 // until here. Also can't call_VM until the bcp has been 1398 // restored. Need bcp for throwing exception below so get it now. 1399 __ get_method(rmethod); 1400 1401 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1402 // rbcp == code_base() 1403 __ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod* 1404 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1405 // handle exceptions (exception handling will handle unlocking!) 1406 { 1407 Label L; 1408 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 1409 __ cbz(rscratch1, L); 1410 // Note: At some point we may want to unify this with the code 1411 // used in call_VM_base(); i.e., we should use the 1412 // StubRoutines::forward_exception code. For now this doesn't work 1413 // here because the rsp is not correctly set at this point. 1414 __ MacroAssembler::call_VM(noreg, 1415 CAST_FROM_FN_PTR(address, 1416 InterpreterRuntime::throw_pending_exception)); 1417 __ should_not_reach_here(); 1418 __ bind(L); 1419 } 1420 1421 // do unlocking if necessary 1422 { 1423 Label L; 1424 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1425 __ tbz(t, exact_log2(JVM_ACC_SYNCHRONIZED), L); 1426 // the code below should be shared with interpreter macro 1427 // assembler implementation 1428 { 1429 Label unlock; 1430 // BasicObjectLock will be first in list, since this is a 1431 // synchronized method. However, need to check that the object 1432 // has not been unlocked by an explicit monitorexit bytecode. 1433 1434 // monitor expect in c_rarg1 for slow unlock path 1435 __ lea (c_rarg1, Address(rfp, // address of first monitor 1436 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1437 wordSize - sizeof(BasicObjectLock)))); 1438 1439 __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1440 __ cbnz(t, unlock); 1441 1442 // Entry already unlocked, need to throw exception 1443 __ MacroAssembler::call_VM(noreg, 1444 CAST_FROM_FN_PTR(address, 1445 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1446 __ should_not_reach_here(); 1447 1448 __ bind(unlock); 1449 __ unlock_object(c_rarg1); 1450 } 1451 __ bind(L); 1452 } 1453 1454 // jvmti support 1455 // Note: This must happen _after_ handling/throwing any exceptions since 1456 // the exception handler code notifies the runtime of method exits 1457 // too. If this happens before, method entry/exit notifications are 1458 // not properly paired (was bug - gri 11/22/99). 1459 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1460 1461 // restore potential result in r0:d0, call result handler to 1462 // restore potential result in ST0 & handle result 1463 1464 __ pop(ltos); 1465 __ pop(dtos); 1466 1467 __ blr(result_handler); 1468 1469 // remove activation 1470 __ ldr(esp, Address(rfp, 1471 frame::interpreter_frame_sender_sp_offset * 1472 wordSize)); // get sender sp 1473 // remove frame anchor 1474 __ leave(); 1475 1476 // resture sender sp 1477 __ mov(sp, esp); 1478 1479 __ ret(lr); 1480 1481 if (inc_counter) { 1482 // Handle overflow of counter and compile method 1483 __ bind(invocation_counter_overflow); 1484 generate_counter_overflow(continue_after_compile); 1485 } 1486 1487 return entry_point; 1488 } 1489 1490 // 1491 // Generic interpreted method entry to (asm) interpreter 1492 // 1493 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1494 // determine code generation flags 1495 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1496 1497 // rscratch1: sender sp 1498 address entry_point = __ pc(); 1499 1500 const Address constMethod(rmethod, Method::const_offset()); 1501 const Address access_flags(rmethod, Method::access_flags_offset()); 1502 const Address size_of_parameters(r3, 1503 ConstMethod::size_of_parameters_offset()); 1504 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset()); 1505 1506 // get parameter size (always needed) 1507 // need to load the const method first 1508 __ ldr(r3, constMethod); 1509 __ load_unsigned_short(r2, size_of_parameters); 1510 1511 // r2: size of parameters 1512 1513 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words 1514 __ sub(r3, r3, r2); // r3 = no. of additional locals 1515 1516 // see if we've got enough room on the stack for locals plus overhead. 1517 generate_stack_overflow_check(); 1518 1519 // compute beginning of parameters (rlocals) 1520 __ add(rlocals, esp, r2, ext::uxtx, 3); 1521 __ sub(rlocals, rlocals, wordSize); 1522 1523 // Make room for locals 1524 __ sub(rscratch1, esp, r3, ext::uxtx, 3); 1525 __ andr(sp, rscratch1, -16); 1526 1527 // r3 - # of additional locals 1528 // allocate space for locals 1529 // explicitly initialize locals 1530 { 1531 Label exit, loop; 1532 __ ands(zr, r3, r3); 1533 __ br(Assembler::LE, exit); // do nothing if r3 <= 0 1534 __ bind(loop); 1535 __ str(zr, Address(__ post(rscratch1, wordSize))); 1536 __ sub(r3, r3, 1); // until everything initialized 1537 __ cbnz(r3, loop); 1538 __ bind(exit); 1539 } 1540 1541 // And the base dispatch table 1542 __ get_dispatch(); 1543 1544 // initialize fixed part of activation frame 1545 generate_fixed_frame(false); 1546 #ifndef PRODUCT 1547 // tell the simulator that a method has been entered 1548 if (NotifySimulator) { 1549 __ notify(Assembler::method_entry); 1550 } 1551 #endif 1552 // make sure method is not native & not abstract 1553 #ifdef ASSERT 1554 __ ldrw(r0, access_flags); 1555 { 1556 Label L; 1557 __ tst(r0, JVM_ACC_NATIVE); 1558 __ br(Assembler::EQ, L); 1559 __ stop("tried to execute native method as non-native"); 1560 __ bind(L); 1561 } 1562 { 1563 Label L; 1564 __ tst(r0, JVM_ACC_ABSTRACT); 1565 __ br(Assembler::EQ, L); 1566 __ stop("tried to execute abstract method in interpreter"); 1567 __ bind(L); 1568 } 1569 #endif 1570 1571 // Since at this point in the method invocation the exception 1572 // handler would try to exit the monitor of synchronized methods 1573 // which hasn't been entered yet, we set the thread local variable 1574 // _do_not_unlock_if_synchronized to true. The remove_activation 1575 // will check this flag. 1576 1577 const Address do_not_unlock_if_synchronized(rthread, 1578 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1579 __ mov(rscratch2, true); 1580 __ strb(rscratch2, do_not_unlock_if_synchronized); 1581 1582 // increment invocation count & check for overflow 1583 Label invocation_counter_overflow; 1584 Label profile_method; 1585 Label profile_method_continue; 1586 if (inc_counter) { 1587 generate_counter_incr(&invocation_counter_overflow, 1588 &profile_method, 1589 &profile_method_continue); 1590 if (ProfileInterpreter) { 1591 __ bind(profile_method_continue); 1592 } 1593 } 1594 1595 Label continue_after_compile; 1596 __ bind(continue_after_compile); 1597 1598 bang_stack_shadow_pages(false); 1599 1600 // reset the _do_not_unlock_if_synchronized flag 1601 __ strb(zr, do_not_unlock_if_synchronized); 1602 1603 // check for synchronized methods 1604 // Must happen AFTER invocation_counter check and stack overflow check, 1605 // so method is not locked if overflows. 1606 if (synchronized) { 1607 // Allocate monitor and lock method 1608 lock_method(); 1609 } else { 1610 // no synchronization necessary 1611 #ifdef ASSERT 1612 { 1613 Label L; 1614 __ ldrw(r0, access_flags); 1615 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1616 __ br(Assembler::EQ, L); 1617 __ stop("method needs synchronization"); 1618 __ bind(L); 1619 } 1620 #endif 1621 } 1622 1623 // start execution 1624 #ifdef ASSERT 1625 { 1626 Label L; 1627 const Address monitor_block_top (rfp, 1628 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1629 __ ldr(rscratch1, monitor_block_top); 1630 __ cmp(esp, rscratch1); 1631 __ br(Assembler::EQ, L); 1632 __ stop("broken stack frame setup in interpreter"); 1633 __ bind(L); 1634 } 1635 #endif 1636 1637 // jvmti support 1638 __ notify_method_entry(); 1639 1640 __ dispatch_next(vtos); 1641 1642 // invocation counter overflow 1643 if (inc_counter) { 1644 if (ProfileInterpreter) { 1645 // We have decided to profile this method in the interpreter 1646 __ bind(profile_method); 1647 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1648 __ set_method_data_pointer_for_bcp(); 1649 // don't think we need this 1650 __ get_method(r1); 1651 __ b(profile_method_continue); 1652 } 1653 // Handle overflow of counter and compile method 1654 __ bind(invocation_counter_overflow); 1655 generate_counter_overflow(continue_after_compile); 1656 } 1657 1658 return entry_point; 1659 } 1660 1661 //----------------------------------------------------------------------------- 1662 // Exceptions 1663 1664 void TemplateInterpreterGenerator::generate_throw_exception() { 1665 // Entry point in previous activation (i.e., if the caller was 1666 // interpreted) 1667 Interpreter::_rethrow_exception_entry = __ pc(); 1668 // Restore sp to interpreter_frame_last_sp even though we are going 1669 // to empty the expression stack for the exception processing. 1670 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1671 // r0: exception 1672 // r3: return address/pc that threw exception 1673 __ restore_bcp(); // rbcp points to call/send 1674 __ restore_locals(); 1675 __ restore_constant_pool_cache(); 1676 __ reinit_heapbase(); // restore rheapbase as heapbase. 1677 __ get_dispatch(); 1678 1679 #ifndef PRODUCT 1680 // tell the simulator that the caller method has been reentered 1681 if (NotifySimulator) { 1682 __ get_method(rmethod); 1683 __ notify(Assembler::method_reentry); 1684 } 1685 #endif 1686 // Entry point for exceptions thrown within interpreter code 1687 Interpreter::_throw_exception_entry = __ pc(); 1688 // If we came here via a NullPointerException on the receiver of a 1689 // method, rmethod may be corrupt. 1690 __ get_method(rmethod); 1691 // expression stack is undefined here 1692 // r0: exception 1693 // rbcp: exception bcp 1694 __ verify_oop(r0); 1695 __ mov(c_rarg1, r0); 1696 1697 // expression stack must be empty before entering the VM in case of 1698 // an exception 1699 __ empty_expression_stack(); 1700 // find exception handler address and preserve exception oop 1701 __ call_VM(r3, 1702 CAST_FROM_FN_PTR(address, 1703 InterpreterRuntime::exception_handler_for_exception), 1704 c_rarg1); 1705 1706 // Calculate stack limit 1707 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1708 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1709 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1710 __ ldr(rscratch2, 1711 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1712 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 1713 __ andr(sp, rscratch1, -16); 1714 1715 // r0: exception handler entry point 1716 // r3: preserved exception oop 1717 // rbcp: bcp for exception handler 1718 __ push_ptr(r3); // push exception which is now the only value on the stack 1719 __ br(r0); // jump to exception handler (may be _remove_activation_entry!) 1720 1721 // If the exception is not handled in the current frame the frame is 1722 // removed and the exception is rethrown (i.e. exception 1723 // continuation is _rethrow_exception). 1724 // 1725 // Note: At this point the bci is still the bxi for the instruction 1726 // which caused the exception and the expression stack is 1727 // empty. Thus, for any VM calls at this point, GC will find a legal 1728 // oop map (with empty expression stack). 1729 1730 // 1731 // JVMTI PopFrame support 1732 // 1733 1734 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1735 __ empty_expression_stack(); 1736 // Set the popframe_processing bit in pending_popframe_condition 1737 // indicating that we are currently handling popframe, so that 1738 // call_VMs that may happen later do not trigger new popframe 1739 // handling cycles. 1740 __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1741 __ orr(r3, r3, JavaThread::popframe_processing_bit); 1742 __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1743 1744 { 1745 // Check to see whether we are returning to a deoptimized frame. 1746 // (The PopFrame call ensures that the caller of the popped frame is 1747 // either interpreted or compiled and deoptimizes it if compiled.) 1748 // In this case, we can't call dispatch_next() after the frame is 1749 // popped, but instead must save the incoming arguments and restore 1750 // them after deoptimization has occurred. 1751 // 1752 // Note that we don't compare the return PC against the 1753 // deoptimization blob's unpack entry because of the presence of 1754 // adapter frames in C2. 1755 Label caller_not_deoptimized; 1756 __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize)); 1757 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1758 InterpreterRuntime::interpreter_contains), c_rarg1); 1759 __ cbnz(r0, caller_not_deoptimized); 1760 1761 // Compute size of arguments for saving when returning to 1762 // deoptimized caller 1763 __ get_method(r0); 1764 __ ldr(r0, Address(r0, Method::const_offset())); 1765 __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod:: 1766 size_of_parameters_offset()))); 1767 __ lsl(r0, r0, Interpreter::logStackElementSize); 1768 __ restore_locals(); // XXX do we need this? 1769 __ sub(rlocals, rlocals, r0); 1770 __ add(rlocals, rlocals, wordSize); 1771 // Save these arguments 1772 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1773 Deoptimization:: 1774 popframe_preserve_args), 1775 rthread, r0, rlocals); 1776 1777 __ remove_activation(vtos, 1778 /* throw_monitor_exception */ false, 1779 /* install_monitor_exception */ false, 1780 /* notify_jvmdi */ false); 1781 1782 // Inform deoptimization that it is responsible for restoring 1783 // these arguments 1784 __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1785 __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 1786 1787 // Continue in deoptimization handler 1788 __ ret(lr); 1789 1790 __ bind(caller_not_deoptimized); 1791 } 1792 1793 __ remove_activation(vtos, 1794 /* throw_monitor_exception */ false, 1795 /* install_monitor_exception */ false, 1796 /* notify_jvmdi */ false); 1797 1798 // Restore the last_sp and null it out 1799 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1800 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1801 1802 __ restore_bcp(); 1803 __ restore_locals(); 1804 __ restore_constant_pool_cache(); 1805 __ get_method(rmethod); 1806 1807 // The method data pointer was incremented already during 1808 // call profiling. We have to restore the mdp for the current bcp. 1809 if (ProfileInterpreter) { 1810 __ set_method_data_pointer_for_bcp(); 1811 } 1812 1813 // Clear the popframe condition flag 1814 __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset())); 1815 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1816 1817 #if INCLUDE_JVMTI 1818 { 1819 Label L_done; 1820 1821 __ ldrb(rscratch1, Address(rbcp, 0)); 1822 __ cmpw(r1, Bytecodes::_invokestatic); 1823 __ br(Assembler::EQ, L_done); 1824 1825 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1826 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1827 1828 __ ldr(c_rarg0, Address(rlocals, 0)); 1829 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); 1830 1831 __ cbz(r0, L_done); 1832 1833 __ str(r0, Address(esp, 0)); 1834 __ bind(L_done); 1835 } 1836 #endif // INCLUDE_JVMTI 1837 1838 // Restore machine SP 1839 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1840 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1841 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1842 __ ldr(rscratch2, 1843 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1844 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 1845 __ andr(sp, rscratch1, -16); 1846 1847 __ dispatch_next(vtos); 1848 // end of PopFrame support 1849 1850 Interpreter::_remove_activation_entry = __ pc(); 1851 1852 // preserve exception over this code sequence 1853 __ pop_ptr(r0); 1854 __ str(r0, Address(rthread, JavaThread::vm_result_offset())); 1855 // remove the activation (without doing throws on illegalMonitorExceptions) 1856 __ remove_activation(vtos, false, true, false); 1857 // restore exception 1858 // restore exception 1859 __ get_vm_result(r0, rthread); 1860 1861 // In between activations - previous activation type unknown yet 1862 // compute continuation point - the continuation point expects the 1863 // following registers set up: 1864 // 1865 // r0: exception 1866 // lr: return address/pc that threw exception 1867 // rsp: expression stack of caller 1868 // rfp: fp of caller 1869 // FIXME: There's no point saving LR here because VM calls don't trash it 1870 __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address 1871 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1872 SharedRuntime::exception_handler_for_return_address), 1873 rthread, lr); 1874 __ mov(r1, r0); // save exception handler 1875 __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address 1876 // We might be returning to a deopt handler that expects r3 to 1877 // contain the exception pc 1878 __ mov(r3, lr); 1879 // Note that an "issuing PC" is actually the next PC after the call 1880 __ br(r1); // jump to exception 1881 // handler of caller 1882 } 1883 1884 1885 // 1886 // JVMTI ForceEarlyReturn support 1887 // 1888 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1889 address entry = __ pc(); 1890 1891 __ restore_bcp(); 1892 __ restore_locals(); 1893 __ empty_expression_stack(); 1894 __ load_earlyret_value(state); 1895 1896 __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 1897 Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset()); 1898 1899 // Clear the earlyret state 1900 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1901 __ str(zr, cond_addr); 1902 1903 __ remove_activation(state, 1904 false, /* throw_monitor_exception */ 1905 false, /* install_monitor_exception */ 1906 true); /* notify_jvmdi */ 1907 __ ret(lr); 1908 1909 return entry; 1910 } // end of ForceEarlyReturn support 1911 1912 1913 1914 //----------------------------------------------------------------------------- 1915 // Helper for vtos entry point generation 1916 1917 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1918 address& bep, 1919 address& cep, 1920 address& sep, 1921 address& aep, 1922 address& iep, 1923 address& lep, 1924 address& fep, 1925 address& dep, 1926 address& vep) { 1927 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1928 Label L; 1929 aep = __ pc(); __ push_ptr(); __ b(L); 1930 fep = __ pc(); __ push_f(); __ b(L); 1931 dep = __ pc(); __ push_d(); __ b(L); 1932 lep = __ pc(); __ push_l(); __ b(L); 1933 bep = cep = sep = 1934 iep = __ pc(); __ push_i(); 1935 vep = __ pc(); 1936 __ bind(L); 1937 generate_and_dispatch(t); 1938 } 1939 1940 //----------------------------------------------------------------------------- 1941 1942 // Non-product code 1943 #ifndef PRODUCT 1944 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1945 address entry = __ pc(); 1946 1947 __ push(lr); 1948 __ push(state); 1949 __ push(RegSet::range(r0, r15), sp); 1950 __ mov(c_rarg2, r0); // Pass itos 1951 __ call_VM(noreg, 1952 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1953 c_rarg1, c_rarg2, c_rarg3); 1954 __ pop(RegSet::range(r0, r15), sp); 1955 __ pop(state); 1956 __ pop(lr); 1957 __ ret(lr); // return from result handler 1958 1959 return entry; 1960 } 1961 1962 void TemplateInterpreterGenerator::count_bytecode() { 1963 Register rscratch3 = r0; 1964 __ push(rscratch1); 1965 __ push(rscratch2); 1966 __ push(rscratch3); 1967 __ mov(rscratch3, (address) &BytecodeCounter::_counter_value); 1968 __ atomic_add(noreg, 1, rscratch3); 1969 __ pop(rscratch3); 1970 __ pop(rscratch2); 1971 __ pop(rscratch1); 1972 } 1973 1974 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } 1975 1976 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } 1977 1978 1979 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1980 // Call a little run-time stub to avoid blow-up for each bytecode. 1981 // The run-time runtime saves the right registers, depending on 1982 // the tosca in-state for the given template. 1983 1984 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1985 "entry must have been generated"); 1986 __ bl(Interpreter::trace_code(t->tos_in())); 1987 __ reinit_heapbase(); 1988 } 1989 1990 1991 void TemplateInterpreterGenerator::stop_interpreter_at() { 1992 Label L; 1993 __ push(rscratch1); 1994 __ mov(rscratch1, (address) &BytecodeCounter::_counter_value); 1995 __ ldr(rscratch1, Address(rscratch1)); 1996 __ mov(rscratch2, StopInterpreterAt); 1997 __ cmpw(rscratch1, rscratch2); 1998 __ br(Assembler::NE, L); 1999 __ brk(0); 2000 __ bind(L); 2001 __ pop(rscratch1); 2002 } 2003 2004 #ifdef BUILTIN_SIM 2005 2006 #include <sys/mman.h> 2007 #include <unistd.h> 2008 2009 extern "C" { 2010 static int PAGESIZE = getpagesize(); 2011 int is_mapped_address(u_int64_t address) 2012 { 2013 address = (address & ~((u_int64_t)PAGESIZE - 1)); 2014 if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) { 2015 return true; 2016 } 2017 if (errno != ENOMEM) { 2018 return true; 2019 } 2020 return false; 2021 } 2022 2023 void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2024 { 2025 if (method != 0) { 2026 method[0] = '\0'; 2027 } 2028 if (bcidx != 0) { 2029 *bcidx = -2; 2030 } 2031 if (decode != 0) { 2032 decode[0] = 0; 2033 } 2034 2035 if (framesize != 0) { 2036 *framesize = -1; 2037 } 2038 2039 if (Interpreter::contains((address)pc)) { 2040 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); 2041 Method* meth; 2042 address bcp; 2043 if (fp) { 2044 #define FRAME_SLOT_METHOD 3 2045 #define FRAME_SLOT_BCP 7 2046 meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3)); 2047 bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3)); 2048 #undef FRAME_SLOT_METHOD 2049 #undef FRAME_SLOT_BCP 2050 } else { 2051 meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0); 2052 bcp = (address)sim->getCPUState().xreg(RBCP, 0); 2053 } 2054 if (meth->is_native()) { 2055 return; 2056 } 2057 if(method && meth->is_method()) { 2058 ResourceMark rm; 2059 method[0] = 'I'; 2060 method[1] = ' '; 2061 meth->name_and_sig_as_C_string(method + 2, 398); 2062 } 2063 if (bcidx) { 2064 if (meth->contains(bcp)) { 2065 *bcidx = meth->bci_from(bcp); 2066 } else { 2067 *bcidx = -2; 2068 } 2069 } 2070 if (decode) { 2071 if (!BytecodeTracer::closure()) { 2072 BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 2073 } 2074 stringStream str(decode, 400); 2075 BytecodeTracer::trace(meth, bcp, &str); 2076 } 2077 } else { 2078 if (method) { 2079 CodeBlob *cb = CodeCache::find_blob((address)pc); 2080 if (cb != NULL) { 2081 if (cb->is_nmethod()) { 2082 ResourceMark rm; 2083 nmethod* nm = (nmethod*)cb; 2084 method[0] = 'C'; 2085 method[1] = ' '; 2086 nm->method()->name_and_sig_as_C_string(method + 2, 398); 2087 } else if (cb->is_adapter_blob()) { 2088 strcpy(method, "B adapter blob"); 2089 } else if (cb->is_runtime_stub()) { 2090 strcpy(method, "B runtime stub"); 2091 } else if (cb->is_exception_stub()) { 2092 strcpy(method, "B exception stub"); 2093 } else if (cb->is_deoptimization_stub()) { 2094 strcpy(method, "B deoptimization stub"); 2095 } else if (cb->is_safepoint_stub()) { 2096 strcpy(method, "B safepoint stub"); 2097 } else if (cb->is_uncommon_trap_stub()) { 2098 strcpy(method, "B uncommon trap stub"); 2099 } else if (cb->contains((address)StubRoutines::call_stub())) { 2100 strcpy(method, "B call stub"); 2101 } else { 2102 strcpy(method, "B unknown blob : "); 2103 strcat(method, cb->name()); 2104 } 2105 if (framesize != NULL) { 2106 *framesize = cb->frame_size(); 2107 } 2108 } 2109 } 2110 } 2111 } 2112 2113 2114 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2115 { 2116 bccheck1(pc, fp, method, bcidx, framesize, decode); 2117 } 2118 } 2119 2120 #endif // BUILTIN_SIM 2121 #endif // !PRODUCT