1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterGenerator.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "interpreter/bytecodeTracer.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/methodData.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include <sys/types.h> 50 51 #ifndef PRODUCT 52 #include "oops/method.hpp" 53 #endif // !PRODUCT 54 55 #define __ _masm-> 56 57 #ifndef CC_INTERP 58 59 //----------------------------------------------------------------------------- 60 61 extern "C" void entry(CodeBuffer*); 62 63 //----------------------------------------------------------------------------- 64 65 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 66 address entry = __ pc(); 67 68 #ifdef ASSERT 69 { 70 Label L; 71 __ ldr(rscratch1, Address(rfp, 72 frame::interpreter_frame_monitor_block_top_offset * 73 wordSize)); 74 __ mov(rscratch2, sp); 75 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack 76 // grows negative) 77 __ br(Assembler::HS, L); // check if frame is complete 78 __ stop ("interpreter frame not set up"); 79 __ bind(L); 80 } 81 #endif // ASSERT 82 // Restore bcp under the assumption that the current frame is still 83 // interpreted 84 __ restore_bcp(); 85 86 // expression stack must be empty before entering the VM if an 87 // exception happened 88 __ empty_expression_stack(); 89 // throw exception 90 __ call_VM(noreg, 91 CAST_FROM_FN_PTR(address, 92 InterpreterRuntime::throw_StackOverflowError)); 93 return entry; 94 } 95 96 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 97 const char* name) { 98 address entry = __ pc(); 99 // expression stack must be empty before entering the VM if an 100 // exception happened 101 __ empty_expression_stack(); 102 // setup parameters 103 // ??? convention: expect aberrant index in register r1 104 __ movw(c_rarg2, r1); 105 __ mov(c_rarg1, (address)name); 106 __ call_VM(noreg, 107 CAST_FROM_FN_PTR(address, 108 InterpreterRuntime:: 109 throw_ArrayIndexOutOfBoundsException), 110 c_rarg1, c_rarg2); 111 return entry; 112 } 113 114 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 115 address entry = __ pc(); 116 117 // object is at TOS 118 __ pop(c_rarg1); 119 120 // expression stack must be empty before entering the VM if an 121 // exception happened 122 __ empty_expression_stack(); 123 124 __ call_VM(noreg, 125 CAST_FROM_FN_PTR(address, 126 InterpreterRuntime:: 127 throw_ClassCastException), 128 c_rarg1); 129 return entry; 130 } 131 132 address TemplateInterpreterGenerator::generate_exception_handler_common( 133 const char* name, const char* message, bool pass_oop) { 134 assert(!pass_oop || message == NULL, "either oop or message but not both"); 135 address entry = __ pc(); 136 if (pass_oop) { 137 // object is at TOS 138 __ pop(c_rarg2); 139 } 140 // expression stack must be empty before entering the VM if an 141 // exception happened 142 __ empty_expression_stack(); 143 // setup parameters 144 __ lea(c_rarg1, Address((address)name)); 145 if (pass_oop) { 146 __ call_VM(r0, CAST_FROM_FN_PTR(address, 147 InterpreterRuntime:: 148 create_klass_exception), 149 c_rarg1, c_rarg2); 150 } else { 151 // kind of lame ExternalAddress can't take NULL because 152 // external_word_Relocation will assert. 153 if (message != NULL) { 154 __ lea(c_rarg2, Address((address)message)); 155 } else { 156 __ mov(c_rarg2, NULL_WORD); 157 } 158 __ call_VM(r0, 159 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 160 c_rarg1, c_rarg2); 161 } 162 // throw exception 163 __ b(address(Interpreter::throw_exception_entry())); 164 return entry; 165 } 166 167 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 168 address entry = __ pc(); 169 // NULL last_sp until next java call 170 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 171 __ dispatch_next(state); 172 return entry; 173 } 174 175 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 176 address entry = __ pc(); 177 178 // Restore stack bottom in case i2c adjusted stack 179 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 180 // and NULL it as marker that esp is now tos until next java call 181 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 182 __ restore_bcp(); 183 __ restore_locals(); 184 __ restore_constant_pool_cache(); 185 __ get_method(rmethod); 186 187 // Pop N words from the stack 188 __ get_cache_and_index_at_bcp(r1, r2, 1, index_size); 189 __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 190 __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask); 191 192 __ add(esp, esp, r1, Assembler::LSL, 3); 193 194 // Restore machine SP 195 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 196 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 197 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 198 __ ldr(rscratch2, 199 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 200 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 201 __ andr(sp, rscratch1, -16); 202 203 __ get_dispatch(); 204 __ dispatch_next(state, step); 205 206 return entry; 207 } 208 209 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 210 int step) { 211 address entry = __ pc(); 212 __ restore_bcp(); 213 __ restore_locals(); 214 __ restore_constant_pool_cache(); 215 __ get_method(rmethod); 216 217 // handle exceptions 218 { 219 Label L; 220 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 221 __ cbz(rscratch1, L); 222 __ call_VM(noreg, 223 CAST_FROM_FN_PTR(address, 224 InterpreterRuntime::throw_pending_exception)); 225 __ should_not_reach_here(); 226 __ bind(L); 227 } 228 229 __ get_dispatch(); 230 231 // Calculate stack limit 232 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 233 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 234 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() 235 + (EnableInvokeDynamic ? 2 : 0)); 236 __ ldr(rscratch2, 237 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 238 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 239 __ andr(sp, rscratch1, -16); 240 241 // Restore expression stack pointer 242 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 243 // NULL last_sp until next java call 244 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 245 246 __ dispatch_next(state, step); 247 return entry; 248 } 249 250 251 int AbstractInterpreter::BasicType_as_index(BasicType type) { 252 int i = 0; 253 switch (type) { 254 case T_BOOLEAN: i = 0; break; 255 case T_CHAR : i = 1; break; 256 case T_BYTE : i = 2; break; 257 case T_SHORT : i = 3; break; 258 case T_INT : i = 4; break; 259 case T_LONG : i = 5; break; 260 case T_VOID : i = 6; break; 261 case T_FLOAT : i = 7; break; 262 case T_DOUBLE : i = 8; break; 263 case T_OBJECT : i = 9; break; 264 case T_ARRAY : i = 9; break; 265 default : ShouldNotReachHere(); 266 } 267 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, 268 "index out of bounds"); 269 return i; 270 } 271 272 273 address TemplateInterpreterGenerator::generate_result_handler_for( 274 BasicType type) { 275 address entry = __ pc(); 276 switch (type) { 277 case T_BOOLEAN: __ c2bool(r0); break; 278 case T_CHAR : __ uxth(r0, r0); break; 279 case T_BYTE : __ sxtb(r0, r0); break; 280 case T_SHORT : __ sxth(r0, r0); break; 281 case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this 282 case T_LONG : /* nothing to do */ break; 283 case T_VOID : /* nothing to do */ break; 284 case T_FLOAT : /* nothing to do */ break; 285 case T_DOUBLE : /* nothing to do */ break; 286 case T_OBJECT : 287 // retrieve result from frame 288 __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 289 // and verify it 290 __ verify_oop(r0); 291 break; 292 default : ShouldNotReachHere(); 293 } 294 __ ret(lr); // return from result handler 295 return entry; 296 } 297 298 address TemplateInterpreterGenerator::generate_safept_entry_for( 299 TosState state, 300 address runtime_entry) { 301 address entry = __ pc(); 302 __ push(state); 303 __ call_VM(noreg, runtime_entry); 304 __ membar(Assembler::AnyAny); 305 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 306 return entry; 307 } 308 309 // Helpers for commoning out cases in the various type of method entries. 310 // 311 312 313 // increment invocation count & check for overflow 314 // 315 // Note: checking for negative value instead of overflow 316 // so we have a 'sticky' overflow test 317 // 318 // rmethod: method 319 // 320 void InterpreterGenerator::generate_counter_incr( 321 Label* overflow, 322 Label* profile_method, 323 Label* profile_method_continue) { 324 Label done; 325 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 326 if (TieredCompilation) { 327 int increment = InvocationCounter::count_increment; 328 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 329 Label no_mdo; 330 if (ProfileInterpreter) { 331 // Are we profiling? 332 __ ldr(r0, Address(rmethod, Method::method_data_offset())); 333 __ cbz(r0, no_mdo); 334 // Increment counter in the MDO 335 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) + 336 in_bytes(InvocationCounter::counter_offset())); 337 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow); 338 __ b(done); 339 } 340 __ bind(no_mdo); 341 // Increment counter in MethodCounters 342 const Address invocation_counter(rscratch2, 343 MethodCounters::invocation_counter_offset() + 344 InvocationCounter::counter_offset()); 345 __ get_method_counters(rmethod, rscratch2, done); 346 __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow); 347 __ bind(done); 348 } else { 349 const Address backedge_counter(rscratch2, 350 MethodCounters::backedge_counter_offset() + 351 InvocationCounter::counter_offset()); 352 const Address invocation_counter(rscratch2, 353 MethodCounters::invocation_counter_offset() + 354 InvocationCounter::counter_offset()); 355 356 __ get_method_counters(rmethod, rscratch2, done); 357 358 if (ProfileInterpreter) { // %%% Merge this into MethodData* 359 __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 360 __ addw(r1, r1, 1); 361 __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 362 } 363 // Update standard invocation counters 364 __ ldrw(r1, invocation_counter); 365 __ ldrw(r0, backedge_counter); 366 367 __ addw(r1, r1, InvocationCounter::count_increment); 368 __ andw(r0, r0, InvocationCounter::count_mask_value); 369 370 __ strw(r1, invocation_counter); 371 __ addw(r0, r0, r1); // add both counters 372 373 // profile_method is non-null only for interpreted method so 374 // profile_method != NULL == !native_call 375 376 if (ProfileInterpreter && profile_method != NULL) { 377 // Test to see if we should create a method data oop 378 unsigned long offset; 379 __ adrp(rscratch2, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit), 380 offset); 381 __ ldrw(rscratch2, Address(rscratch2, offset)); 382 __ cmp(r0, rscratch2); 383 __ br(Assembler::LT, *profile_method_continue); 384 385 // if no method data exists, go to profile_method 386 __ test_method_data_pointer(rscratch2, *profile_method); 387 } 388 389 { 390 unsigned long offset; 391 __ adrp(rscratch2, 392 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit), 393 offset); 394 __ ldrw(rscratch2, Address(rscratch2, offset)); 395 __ cmpw(r0, rscratch2); 396 __ br(Assembler::HS, *overflow); 397 } 398 __ bind(done); 399 } 400 } 401 402 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 403 404 // Asm interpreter on entry 405 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 406 // Everything as it was on entry 407 408 // InterpreterRuntime::frequency_counter_overflow takes two 409 // arguments, the first (thread) is passed by call_VM, the second 410 // indicates if the counter overflow occurs at a backwards branch 411 // (NULL bcp). We pass zero for it. The call returns the address 412 // of the verified entry point for the method or NULL if the 413 // compilation did not complete (either went background or bailed 414 // out). 415 __ mov(c_rarg1, 0); 416 __ call_VM(noreg, 417 CAST_FROM_FN_PTR(address, 418 InterpreterRuntime::frequency_counter_overflow), 419 c_rarg1); 420 421 __ b(*do_continue); 422 } 423 424 // See if we've got enough room on the stack for locals plus overhead. 425 // The expression stack grows down incrementally, so the normal guard 426 // page mechanism will work for that. 427 // 428 // NOTE: Since the additional locals are also always pushed (wasn't 429 // obvious in generate_method_entry) so the guard should work for them 430 // too. 431 // 432 // Args: 433 // r3: number of additional locals this frame needs (what we must check) 434 // rmethod: Method* 435 // 436 // Kills: 437 // r0 438 void InterpreterGenerator::generate_stack_overflow_check(void) { 439 440 // monitor entry size: see picture of stack set 441 // (generate_method_entry) and frame_amd64.hpp 442 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 443 444 // total overhead size: entry_size + (saved rbp through expr stack 445 // bottom). be sure to change this if you add/subtract anything 446 // to/from the overhead area 447 const int overhead_size = 448 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 449 450 const int page_size = os::vm_page_size(); 451 452 Label after_frame_check; 453 454 // see if the frame is greater than one page in size. If so, 455 // then we need to verify there is enough stack space remaining 456 // for the additional locals. 457 // 458 // Note that we use SUBS rather than CMP here because the immediate 459 // field of this instruction may overflow. SUBS can cope with this 460 // because it is a macro that will expand to some number of MOV 461 // instructions and a register operation. 462 __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize); 463 __ br(Assembler::LS, after_frame_check); 464 465 // compute rsp as if this were going to be the last frame on 466 // the stack before the red zone 467 468 const Address stack_base(rthread, Thread::stack_base_offset()); 469 const Address stack_size(rthread, Thread::stack_size_offset()); 470 471 // locals + overhead, in bytes 472 __ mov(r0, overhead_size); 473 __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter. 474 475 __ ldr(rscratch1, stack_base); 476 __ ldr(rscratch2, stack_size); 477 478 #ifdef ASSERT 479 Label stack_base_okay, stack_size_okay; 480 // verify that thread stack base is non-zero 481 __ cbnz(rscratch1, stack_base_okay); 482 __ stop("stack base is zero"); 483 __ bind(stack_base_okay); 484 // verify that thread stack size is non-zero 485 __ cbnz(rscratch2, stack_size_okay); 486 __ stop("stack size is zero"); 487 __ bind(stack_size_okay); 488 #endif 489 490 // Add stack base to locals and subtract stack size 491 __ sub(rscratch1, rscratch1, rscratch2); // Stack limit 492 __ add(r0, r0, rscratch1); 493 494 // Use the maximum number of pages we might bang. 495 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 496 (StackRedPages+StackYellowPages); 497 498 // add in the red and yellow zone sizes 499 __ add(r0, r0, max_pages * page_size * 2); 500 501 // check against the current stack bottom 502 __ cmp(sp, r0); 503 __ br(Assembler::HI, after_frame_check); 504 505 // Remove the incoming args, peeling the machine SP back to where it 506 // was in the caller. This is not strictly necessary, but unless we 507 // do so the stack frame may have a garbage FP; this ensures a 508 // correct call stack that we can always unwind. The ANDR should be 509 // unnecessary because the sender SP in r13 is always aligned, but 510 // it doesn't hurt. 511 __ andr(sp, r13, -16); 512 513 // Note: the restored frame is not necessarily interpreted. 514 // Use the shared runtime version of the StackOverflowError. 515 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 516 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); 517 518 // all done with frame size check 519 __ bind(after_frame_check); 520 } 521 522 // Allocate monitor and lock method (asm interpreter) 523 // 524 // Args: 525 // rmethod: Method* 526 // rlocals: locals 527 // 528 // Kills: 529 // r0 530 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 531 // rscratch1, rscratch2 (scratch regs) 532 void InterpreterGenerator::lock_method(void) { 533 // synchronize method 534 const Address access_flags(rmethod, Method::access_flags_offset()); 535 const Address monitor_block_top( 536 rfp, 537 frame::interpreter_frame_monitor_block_top_offset * wordSize); 538 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 539 540 #ifdef ASSERT 541 { 542 Label L; 543 __ ldrw(r0, access_flags); 544 __ tst(r0, JVM_ACC_SYNCHRONIZED); 545 __ br(Assembler::NE, L); 546 __ stop("method doesn't need synchronization"); 547 __ bind(L); 548 } 549 #endif // ASSERT 550 551 // get synchronization object 552 { 553 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 554 Label done; 555 __ ldrw(r0, access_flags); 556 __ tst(r0, JVM_ACC_STATIC); 557 // get receiver (assume this is frequent case) 558 __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 559 __ br(Assembler::EQ, done); 560 __ ldr(r0, Address(rmethod, Method::const_offset())); 561 __ ldr(r0, Address(r0, ConstMethod::constants_offset())); 562 __ ldr(r0, Address(r0, 563 ConstantPool::pool_holder_offset_in_bytes())); 564 __ ldr(r0, Address(r0, mirror_offset)); 565 566 #ifdef ASSERT 567 { 568 Label L; 569 __ cbnz(r0, L); 570 __ stop("synchronization object is NULL"); 571 __ bind(L); 572 } 573 #endif // ASSERT 574 575 __ bind(done); 576 } 577 578 // add space for monitor & lock 579 __ sub(sp, sp, entry_size); // add space for a monitor entry 580 __ sub(esp, esp, entry_size); 581 __ mov(rscratch1, esp); 582 __ str(rscratch1, monitor_block_top); // set new monitor block top 583 // store object 584 __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); 585 __ mov(c_rarg1, esp); // object address 586 __ lock_object(c_rarg1); 587 } 588 589 // Generate a fixed interpreter frame. This is identical setup for 590 // interpreted methods and for native methods hence the shared code. 591 // 592 // Args: 593 // lr: return address 594 // rmethod: Method* 595 // rlocals: pointer to locals 596 // rcpool: cp cache 597 // stack_pointer: previous sp 598 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 599 // initialize fixed part of activation frame 600 if (native_call) { 601 __ sub(esp, sp, 12 * wordSize); 602 __ mov(rbcp, zr); 603 __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize))); 604 // add 2 zero-initialized slots for native calls 605 __ stp(zr, zr, Address(sp, 10 * wordSize)); 606 } else { 607 __ sub(esp, sp, 10 * wordSize); 608 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod 609 __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase 610 __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize))); 611 } 612 613 if (ProfileInterpreter) { 614 Label method_data_continue; 615 __ ldr(rscratch1, Address(rmethod, Method::method_data_offset())); 616 __ cbz(rscratch1, method_data_continue); 617 __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset()))); 618 __ bind(method_data_continue); 619 __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize)); // save Method* and mdp (method data pointer) 620 } else { 621 __ stp(zr, rmethod, Address(sp, 4 * wordSize)); // save Method* (no mdp) 622 } 623 624 __ ldr(rcpool, Address(rmethod, Method::const_offset())); 625 __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); 626 __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes())); 627 __ stp(rlocals, rcpool, Address(sp, 2 * wordSize)); 628 629 __ stp(rfp, lr, Address(sp, 8 * wordSize)); 630 __ lea(rfp, Address(sp, 8 * wordSize)); 631 632 // set sender sp 633 // leave last_sp as null 634 __ stp(zr, r13, Address(sp, 6 * wordSize)); 635 636 // Move SP out of the way 637 if (! native_call) { 638 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 639 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 640 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() 641 + (EnableInvokeDynamic ? 2 : 0)); 642 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3); 643 __ andr(sp, rscratch1, -16); 644 } 645 } 646 647 // End of helpers 648 649 // Various method entries 650 //------------------------------------------------------------------------------------------------------------------------ 651 // 652 // 653 654 // Call an accessor method (assuming it is resolved, otherwise drop 655 // into vanilla (slow path) entry 656 address InterpreterGenerator::generate_accessor_entry(void) { 657 return NULL; 658 } 659 660 // Method entry for java.lang.ref.Reference.get. 661 address InterpreterGenerator::generate_Reference_get_entry(void) { 662 #if INCLUDE_ALL_GCS 663 // Code: _aload_0, _getfield, _areturn 664 // parameter size = 1 665 // 666 // The code that gets generated by this routine is split into 2 parts: 667 // 1. The "intrinsified" code for G1 (or any SATB based GC), 668 // 2. The slow path - which is an expansion of the regular method entry. 669 // 670 // Notes:- 671 // * In the G1 code we do not check whether we need to block for 672 // a safepoint. If G1 is enabled then we must execute the specialized 673 // code for Reference.get (except when the Reference object is null) 674 // so that we can log the value in the referent field with an SATB 675 // update buffer. 676 // If the code for the getfield template is modified so that the 677 // G1 pre-barrier code is executed when the current method is 678 // Reference.get() then going through the normal method entry 679 // will be fine. 680 // * The G1 code can, however, check the receiver object (the instance 681 // of java.lang.Reference) and jump to the slow path if null. If the 682 // Reference object is null then we obviously cannot fetch the referent 683 // and so we don't need to call the G1 pre-barrier. Thus we can use the 684 // regular method entry code to generate the NPE. 685 // 686 // This code is based on generate_accessor_entry. 687 // 688 // rmethod: Method* 689 // r13: senderSP must preserve for slow path, set SP to it on fast path 690 691 address entry = __ pc(); 692 693 const int referent_offset = java_lang_ref_Reference::referent_offset; 694 guarantee(referent_offset > 0, "referent offset not initialized"); 695 696 if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { 697 Label slow_path; 698 const Register local_0 = c_rarg0; 699 // Check if local 0 != NULL 700 // If the receiver is null then it is OK to jump to the slow path. 701 __ ldr(local_0, Address(esp, 0)); 702 __ mov(r19, r13); // First call-saved register 703 __ cbz(local_0, slow_path); 704 705 // Load the value of the referent field. 706 const Address field_address(local_0, referent_offset); 707 __ load_heap_oop(local_0, field_address); 708 709 __ mov(r19, r13); // Move senderSP to a callee-saved register 710 // Generate the G1 pre-barrier code to log the value of 711 // the referent field in an SATB buffer. 712 __ enter(); // g1_write may call runtime 713 __ g1_write_barrier_pre(noreg /* obj */, 714 local_0 /* pre_val */, 715 rthread /* thread */, 716 rscratch2 /* tmp */, 717 true /* tosca_live */, 718 true /* expand_call */); 719 __ leave(); 720 // areturn 721 __ andr(sp, r19, -16); // done with stack 722 __ ret(lr); 723 724 // generate a vanilla interpreter entry as the slow path 725 __ bind(slow_path); 726 (void) generate_normal_entry(false); 727 728 return entry; 729 } 730 #endif // INCLUDE_ALL_GCS 731 732 // If G1 is not enabled then attempt to go through the accessor entry point 733 // Reference.get is an accessor 734 return generate_accessor_entry(); 735 } 736 737 /** 738 * Method entry for static native methods: 739 * int java.util.zip.CRC32.update(int crc, int b) 740 */ 741 address InterpreterGenerator::generate_CRC32_update_entry() { 742 if (UseCRC32Intrinsics) { 743 address entry = __ pc(); 744 745 // rmethod: Method* 746 // r13: senderSP must preserved for slow path 747 // esp: args 748 749 Label slow_path; 750 // If we need a safepoint check, generate full interpreter entry. 751 ExternalAddress state(SafepointSynchronize::address_of_state()); 752 unsigned long offset; 753 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 754 __ ldrw(rscratch1, Address(rscratch1, offset)); 755 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 756 __ cbnz(rscratch1, slow_path); 757 758 // We don't generate local frame and don't align stack because 759 // we call stub code and there is no safepoint on this path. 760 761 // Load parameters 762 const Register crc = c_rarg0; // crc 763 const Register val = c_rarg1; // source java byte value 764 const Register tbl = c_rarg2; // scratch 765 766 // Arguments are reversed on java expression stack 767 __ ldrw(val, Address(esp, 0)); // byte value 768 __ ldrw(crc, Address(esp, wordSize)); // Initial CRC 769 770 __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); 771 __ add(tbl, tbl, offset); 772 773 __ ornw(crc, zr, crc); // ~crc 774 __ update_byte_crc32(crc, val, tbl); 775 __ ornw(crc, zr, crc); // ~crc 776 777 // result in c_rarg0 778 779 __ andr(sp, r13, -16); 780 __ ret(lr); 781 782 // generate a vanilla native entry as the slow path 783 __ bind(slow_path); 784 785 (void) generate_native_entry(false); 786 787 return entry; 788 } 789 return generate_native_entry(false); 790 } 791 792 /** 793 * Method entry for static native methods: 794 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 795 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 796 */ 797 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 798 if (UseCRC32Intrinsics) { 799 address entry = __ pc(); 800 801 // rmethod,: Method* 802 // r13: senderSP must preserved for slow path 803 804 Label slow_path; 805 // If we need a safepoint check, generate full interpreter entry. 806 ExternalAddress state(SafepointSynchronize::address_of_state()); 807 unsigned long offset; 808 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 809 __ ldrw(rscratch1, Address(rscratch1, offset)); 810 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 811 __ cbnz(rscratch1, slow_path); 812 813 // We don't generate local frame and don't align stack because 814 // we call stub code and there is no safepoint on this path. 815 816 // Load parameters 817 const Register crc = c_rarg0; // crc 818 const Register buf = c_rarg1; // source java byte array address 819 const Register len = c_rarg2; // length 820 const Register off = len; // offset (never overlaps with 'len') 821 822 // Arguments are reversed on java expression stack 823 // Calculate address of start element 824 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 825 __ ldr(buf, Address(esp, 2*wordSize)); // long buf 826 __ ldrw(off, Address(esp, wordSize)); // offset 827 __ add(buf, buf, off); // + offset 828 __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC 829 } else { 830 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array 831 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 832 __ ldrw(off, Address(esp, wordSize)); // offset 833 __ add(buf, buf, off); // + offset 834 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC 835 } 836 // Can now load 'len' since we're finished with 'off' 837 __ ldrw(len, Address(esp, 0x0)); // Length 838 839 __ andr(sp, r13, -16); // Restore the caller's SP 840 841 // We are frameless so we can just jump to the stub. 842 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32())); 843 844 // generate a vanilla native entry as the slow path 845 __ bind(slow_path); 846 847 (void) generate_native_entry(false); 848 849 return entry; 850 } 851 return generate_native_entry(false); 852 } 853 854 void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 855 // Bang each page in the shadow zone. We can't assume it's been done for 856 // an interpreter frame with greater than a page of locals, so each page 857 // needs to be checked. Only true for non-native. 858 if (UseStackBanging) { 859 const int start_page = native_call ? StackShadowPages : 1; 860 const int page_size = os::vm_page_size(); 861 for (int pages = start_page; pages <= StackShadowPages ; pages++) { 862 __ sub(rscratch2, sp, pages*page_size); 863 __ str(zr, Address(rscratch2)); 864 } 865 } 866 } 867 868 869 // Interpreter stub for calling a native method. (asm interpreter) 870 // This sets up a somewhat different looking stack for calling the 871 // native method than the typical interpreter frame setup. 872 address InterpreterGenerator::generate_native_entry(bool synchronized) { 873 // determine code generation flags 874 bool inc_counter = UseCompiler || CountCompiledCalls; 875 876 // r1: Method* 877 // rscratch1: sender sp 878 879 address entry_point = __ pc(); 880 881 const Address constMethod (rmethod, Method::const_offset()); 882 const Address access_flags (rmethod, Method::access_flags_offset()); 883 const Address size_of_parameters(r2, ConstMethod:: 884 size_of_parameters_offset()); 885 886 // get parameter size (always needed) 887 __ ldr(r2, constMethod); 888 __ load_unsigned_short(r2, size_of_parameters); 889 890 // native calls don't need the stack size check since they have no 891 // expression stack and the arguments are already on the stack and 892 // we only add a handful of words to the stack 893 894 // rmethod: Method* 895 // r2: size of parameters 896 // rscratch1: sender sp 897 898 // for natives the size of locals is zero 899 900 // compute beginning of parameters (rlocals) 901 __ add(rlocals, esp, r2, ext::uxtx, 3); 902 __ add(rlocals, rlocals, -wordSize); 903 904 // Pull SP back to minimum size: this avoids holes in the stack 905 __ andr(sp, esp, -16); 906 907 // initialize fixed part of activation frame 908 generate_fixed_frame(true); 909 910 // make sure method is native & not abstract 911 #ifdef ASSERT 912 __ ldrw(r0, access_flags); 913 { 914 Label L; 915 __ tst(r0, JVM_ACC_NATIVE); 916 __ br(Assembler::NE, L); 917 __ stop("tried to execute non-native method as native"); 918 __ bind(L); 919 } 920 { 921 Label L; 922 __ tst(r0, JVM_ACC_ABSTRACT); 923 __ br(Assembler::EQ, L); 924 __ stop("tried to execute abstract method in interpreter"); 925 __ bind(L); 926 } 927 #endif 928 929 // Since at this point in the method invocation the exception 930 // handler would try to exit the monitor of synchronized methods 931 // which hasn't been entered yet, we set the thread local variable 932 // _do_not_unlock_if_synchronized to true. The remove_activation 933 // will check this flag. 934 935 const Address do_not_unlock_if_synchronized(rthread, 936 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 937 __ mov(rscratch2, true); 938 __ strb(rscratch2, do_not_unlock_if_synchronized); 939 940 // increment invocation count & check for overflow 941 Label invocation_counter_overflow; 942 if (inc_counter) { 943 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 944 } 945 946 Label continue_after_compile; 947 __ bind(continue_after_compile); 948 949 bang_stack_shadow_pages(true); 950 951 // reset the _do_not_unlock_if_synchronized flag 952 __ strb(zr, do_not_unlock_if_synchronized); 953 954 // check for synchronized methods 955 // Must happen AFTER invocation_counter check and stack overflow check, 956 // so method is not locked if overflows. 957 if (synchronized) { 958 lock_method(); 959 } else { 960 // no synchronization necessary 961 #ifdef ASSERT 962 { 963 Label L; 964 __ ldrw(r0, access_flags); 965 __ tst(r0, JVM_ACC_SYNCHRONIZED); 966 __ br(Assembler::EQ, L); 967 __ stop("method needs synchronization"); 968 __ bind(L); 969 } 970 #endif 971 } 972 973 // start execution 974 #ifdef ASSERT 975 { 976 Label L; 977 const Address monitor_block_top(rfp, 978 frame::interpreter_frame_monitor_block_top_offset * wordSize); 979 __ ldr(rscratch1, monitor_block_top); 980 __ cmp(esp, rscratch1); 981 __ br(Assembler::EQ, L); 982 __ stop("broken stack frame setup in interpreter"); 983 __ bind(L); 984 } 985 #endif 986 987 // jvmti support 988 __ notify_method_entry(); 989 990 // work registers 991 const Register t = r17; 992 const Register result_handler = r19; 993 994 // allocate space for parameters 995 __ ldr(t, Address(rmethod, Method::const_offset())); 996 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 997 998 __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize); 999 __ andr(sp, rscratch1, -16); 1000 __ mov(esp, rscratch1); 1001 1002 // get signature handler 1003 { 1004 Label L; 1005 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1006 __ cbnz(t, L); 1007 __ call_VM(noreg, 1008 CAST_FROM_FN_PTR(address, 1009 InterpreterRuntime::prepare_native_call), 1010 rmethod); 1011 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1012 __ bind(L); 1013 } 1014 1015 // call signature handler 1016 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1017 "adjust this code"); 1018 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1019 "adjust this code"); 1020 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1021 "adjust this code"); 1022 1023 // The generated handlers do not touch rmethod (the method). 1024 // However, large signatures cannot be cached and are generated 1025 // each time here. The slow-path generator can do a GC on return, 1026 // so we must reload it after the call. 1027 __ blr(t); 1028 __ get_method(rmethod); // slow path can do a GC, reload rmethod 1029 1030 1031 // result handler is in r0 1032 // set result handler 1033 __ mov(result_handler, r0); 1034 // pass mirror handle if static call 1035 { 1036 Label L; 1037 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1038 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1039 __ tst(t, JVM_ACC_STATIC); 1040 __ br(Assembler::EQ, L); 1041 // get mirror 1042 __ ldr(t, Address(rmethod, Method::const_offset())); 1043 __ ldr(t, Address(t, ConstMethod::constants_offset())); 1044 __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1045 __ ldr(t, Address(t, mirror_offset)); 1046 // copy mirror into activation frame 1047 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1048 // pass handle to mirror 1049 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize); 1050 __ bind(L); 1051 } 1052 1053 // get native function entry point in r10 1054 { 1055 Label L; 1056 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1057 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1058 __ mov(rscratch2, unsatisfied); 1059 __ ldr(rscratch2, rscratch2); 1060 __ cmp(r10, rscratch2); 1061 __ br(Assembler::NE, L); 1062 __ call_VM(noreg, 1063 CAST_FROM_FN_PTR(address, 1064 InterpreterRuntime::prepare_native_call), 1065 rmethod); 1066 __ get_method(rmethod); 1067 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1068 __ bind(L); 1069 } 1070 1071 // pass JNIEnv 1072 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset())); 1073 1074 // Set the last Java PC in the frame anchor to be the return address from 1075 // the call to the native method: this will allow the debugger to 1076 // generate an accurate stack trace. 1077 Label native_return; 1078 __ set_last_Java_frame(esp, rfp, native_return, rscratch1); 1079 1080 // change thread state 1081 #ifdef ASSERT 1082 { 1083 Label L; 1084 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); 1085 __ cmp(t, _thread_in_Java); 1086 __ br(Assembler::EQ, L); 1087 __ stop("Wrong thread state in native stub"); 1088 __ bind(L); 1089 } 1090 #endif 1091 1092 // Change state to native 1093 __ mov(rscratch1, _thread_in_native); 1094 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1095 __ stlrw(rscratch1, rscratch2); 1096 1097 // Call the native method. 1098 __ blr(r10); 1099 __ bind(native_return); 1100 __ maybe_isb(); 1101 __ get_method(rmethod); 1102 // result potentially in r0 or v0 1103 1104 // make room for the pushes we're about to do 1105 __ sub(rscratch1, esp, 4 * wordSize); 1106 __ andr(sp, rscratch1, -16); 1107 1108 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1109 // in order to extract the result of a method call. If the order of these 1110 // pushes change or anything else is added to the stack then the code in 1111 // interpreter_frame_result must also change. 1112 __ push(dtos); 1113 __ push(ltos); 1114 1115 // change thread state 1116 __ mov(rscratch1, _thread_in_native_trans); 1117 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1118 __ stlrw(rscratch1, rscratch2); 1119 1120 if (os::is_MP()) { 1121 if (UseMembar) { 1122 // Force this write out before the read below 1123 __ dsb(Assembler::SY); 1124 } else { 1125 // Write serialization page so VM thread can do a pseudo remote membar. 1126 // We use the current thread pointer to calculate a thread specific 1127 // offset to write to within the page. This minimizes bus traffic 1128 // due to cache line collision. 1129 __ serialize_memory(rthread, rscratch2); 1130 } 1131 } 1132 1133 // check for safepoint operation in progress and/or pending suspend requests 1134 { 1135 Label Continue; 1136 { 1137 unsigned long offset; 1138 __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset); 1139 __ ldrw(rscratch2, Address(rscratch2, offset)); 1140 } 1141 assert(SafepointSynchronize::_not_synchronized == 0, 1142 "SafepointSynchronize::_not_synchronized"); 1143 Label L; 1144 __ cbnz(rscratch2, L); 1145 __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset())); 1146 __ cbz(rscratch2, Continue); 1147 __ bind(L); 1148 1149 // Don't use call_VM as it will see a possible pending exception 1150 // and forward it and never return here preventing us from 1151 // clearing _last_native_pc down below. Also can't use 1152 // call_VM_leaf either as it will check to see if r13 & r14 are 1153 // preserved and correspond to the bcp/locals pointers. So we do a 1154 // runtime call by hand. 1155 // 1156 __ mov(c_rarg0, rthread); 1157 __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1158 __ blr(rscratch2); 1159 __ maybe_isb(); 1160 __ get_method(rmethod); 1161 __ reinit_heapbase(); 1162 __ bind(Continue); 1163 } 1164 1165 // change thread state 1166 __ mov(rscratch1, _thread_in_Java); 1167 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1168 __ stlrw(rscratch1, rscratch2); 1169 1170 // reset_last_Java_frame 1171 __ reset_last_Java_frame(true); 1172 1173 // reset handle block 1174 __ ldr(t, Address(rthread, JavaThread::active_handles_offset())); 1175 __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); 1176 1177 // If result is an oop unbox and store it in frame where gc will see it 1178 // and result handler will pick it up 1179 1180 { 1181 Label no_oop, not_weak, store_result; 1182 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1183 __ cmp(t, result_handler); 1184 __ br(Assembler::NE, no_oop); 1185 // Unbox oop result, e.g. JNIHandles::resolve result. 1186 __ pop(ltos); 1187 __ cbz(r0, store_result); // Use NULL as-is. 1188 STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u); 1189 __ tbz(r0, 0, not_weak); // Test for jweak tag. 1190 // Resolve jweak. 1191 __ ldr(r0, Address(r0, -JNIHandles::weak_tag_value)); 1192 #if INCLUDE_ALL_GCS 1193 if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { 1194 __ enter(); // Barrier may call runtime. 1195 __ g1_write_barrier_pre(noreg /* obj */, 1196 r0 /* pre_val */, 1197 rthread /* thread */, 1198 t /* tmp */, 1199 true /* tosca_live */, 1200 true /* expand_call */); 1201 __ leave(); 1202 } 1203 #endif // INCLUDE_ALL_GCS 1204 __ b(store_result); 1205 __ bind(not_weak); 1206 // Resolve (untagged) jobject. 1207 __ ldr(r0, Address(r0, 0)); 1208 __ bind(store_result); 1209 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 1210 // keep stack depth as expected by pushing oop which will eventually be discarded 1211 __ push(ltos); 1212 __ bind(no_oop); 1213 } 1214 1215 { 1216 Label no_reguard; 1217 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1218 __ ldrb(rscratch1, Address(rscratch1)); 1219 __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled); 1220 __ br(Assembler::NE, no_reguard); 1221 1222 __ pusha(); // XXX only save smashed registers 1223 __ mov(c_rarg0, rthread); 1224 __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1225 __ blr(rscratch2); 1226 __ popa(); // XXX only restore smashed registers 1227 __ bind(no_reguard); 1228 } 1229 1230 // The method register is junk from after the thread_in_native transition 1231 // until here. Also can't call_VM until the bcp has been 1232 // restored. Need bcp for throwing exception below so get it now. 1233 __ get_method(rmethod); 1234 1235 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1236 // rbcp == code_base() 1237 __ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod* 1238 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1239 // handle exceptions (exception handling will handle unlocking!) 1240 { 1241 Label L; 1242 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 1243 __ cbz(rscratch1, L); 1244 // Note: At some point we may want to unify this with the code 1245 // used in call_VM_base(); i.e., we should use the 1246 // StubRoutines::forward_exception code. For now this doesn't work 1247 // here because the rsp is not correctly set at this point. 1248 __ MacroAssembler::call_VM(noreg, 1249 CAST_FROM_FN_PTR(address, 1250 InterpreterRuntime::throw_pending_exception)); 1251 __ should_not_reach_here(); 1252 __ bind(L); 1253 } 1254 1255 // do unlocking if necessary 1256 { 1257 Label L; 1258 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1259 __ tst(t, JVM_ACC_SYNCHRONIZED); 1260 __ br(Assembler::EQ, L); 1261 // the code below should be shared with interpreter macro 1262 // assembler implementation 1263 { 1264 Label unlock; 1265 // BasicObjectLock will be first in list, since this is a 1266 // synchronized method. However, need to check that the object 1267 // has not been unlocked by an explicit monitorexit bytecode. 1268 1269 // monitor expect in c_rarg1 for slow unlock path 1270 __ lea (c_rarg1, Address(rfp, // address of first monitor 1271 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1272 wordSize - sizeof(BasicObjectLock)))); 1273 1274 __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1275 __ cbnz(t, unlock); 1276 1277 // Entry already unlocked, need to throw exception 1278 __ MacroAssembler::call_VM(noreg, 1279 CAST_FROM_FN_PTR(address, 1280 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1281 __ should_not_reach_here(); 1282 1283 __ bind(unlock); 1284 __ unlock_object(c_rarg1); 1285 } 1286 __ bind(L); 1287 } 1288 1289 // jvmti support 1290 // Note: This must happen _after_ handling/throwing any exceptions since 1291 // the exception handler code notifies the runtime of method exits 1292 // too. If this happens before, method entry/exit notifications are 1293 // not properly paired (was bug - gri 11/22/99). 1294 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1295 1296 // restore potential result in r0:d0, call result handler to 1297 // restore potential result in ST0 & handle result 1298 1299 __ pop(ltos); 1300 __ pop(dtos); 1301 1302 __ blr(result_handler); 1303 1304 // remove activation 1305 __ ldr(esp, Address(rfp, 1306 frame::interpreter_frame_sender_sp_offset * 1307 wordSize)); // get sender sp 1308 // remove frame anchor 1309 __ leave(); 1310 1311 // resture sender sp 1312 __ mov(sp, esp); 1313 1314 __ ret(lr); 1315 1316 if (inc_counter) { 1317 // Handle overflow of counter and compile method 1318 __ bind(invocation_counter_overflow); 1319 generate_counter_overflow(&continue_after_compile); 1320 } 1321 1322 return entry_point; 1323 } 1324 1325 // 1326 // Generic interpreted method entry to (asm) interpreter 1327 // 1328 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1329 // determine code generation flags 1330 bool inc_counter = UseCompiler || CountCompiledCalls; 1331 1332 // rscratch1: sender sp 1333 address entry_point = __ pc(); 1334 1335 const Address constMethod(rmethod, Method::const_offset()); 1336 const Address access_flags(rmethod, Method::access_flags_offset()); 1337 const Address size_of_parameters(r3, 1338 ConstMethod::size_of_parameters_offset()); 1339 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset()); 1340 1341 // get parameter size (always needed) 1342 // need to load the const method first 1343 __ ldr(r3, constMethod); 1344 __ load_unsigned_short(r2, size_of_parameters); 1345 1346 // r2: size of parameters 1347 1348 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words 1349 __ sub(r3, r3, r2); // r3 = no. of additional locals 1350 1351 // see if we've got enough room on the stack for locals plus overhead. 1352 generate_stack_overflow_check(); 1353 1354 // compute beginning of parameters (rlocals) 1355 __ add(rlocals, esp, r2, ext::uxtx, 3); 1356 __ sub(rlocals, rlocals, wordSize); 1357 1358 // Make room for locals 1359 __ sub(rscratch1, esp, r3, ext::uxtx, 3); 1360 __ andr(sp, rscratch1, -16); 1361 1362 // r3 - # of additional locals 1363 // allocate space for locals 1364 // explicitly initialize locals 1365 { 1366 Label exit, loop; 1367 __ ands(zr, r3, r3); 1368 __ br(Assembler::LE, exit); // do nothing if r3 <= 0 1369 __ bind(loop); 1370 __ str(zr, Address(__ post(rscratch1, wordSize))); 1371 __ sub(r3, r3, 1); // until everything initialized 1372 __ cbnz(r3, loop); 1373 __ bind(exit); 1374 } 1375 1376 // And the base dispatch table 1377 __ get_dispatch(); 1378 1379 // initialize fixed part of activation frame 1380 generate_fixed_frame(false); 1381 1382 // make sure method is not native & not abstract 1383 #ifdef ASSERT 1384 __ ldrw(r0, access_flags); 1385 { 1386 Label L; 1387 __ tst(r0, JVM_ACC_NATIVE); 1388 __ br(Assembler::EQ, L); 1389 __ stop("tried to execute native method as non-native"); 1390 __ bind(L); 1391 } 1392 { 1393 Label L; 1394 __ tst(r0, JVM_ACC_ABSTRACT); 1395 __ br(Assembler::EQ, L); 1396 __ stop("tried to execute abstract method in interpreter"); 1397 __ bind(L); 1398 } 1399 #endif 1400 1401 // Since at this point in the method invocation the exception 1402 // handler would try to exit the monitor of synchronized methods 1403 // which hasn't been entered yet, we set the thread local variable 1404 // _do_not_unlock_if_synchronized to true. The remove_activation 1405 // will check this flag. 1406 1407 const Address do_not_unlock_if_synchronized(rthread, 1408 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1409 __ mov(rscratch2, true); 1410 __ strb(rscratch2, do_not_unlock_if_synchronized); 1411 1412 // increment invocation count & check for overflow 1413 Label invocation_counter_overflow; 1414 Label profile_method; 1415 Label profile_method_continue; 1416 if (inc_counter) { 1417 generate_counter_incr(&invocation_counter_overflow, 1418 &profile_method, 1419 &profile_method_continue); 1420 if (ProfileInterpreter) { 1421 __ bind(profile_method_continue); 1422 } 1423 } 1424 1425 Label continue_after_compile; 1426 __ bind(continue_after_compile); 1427 1428 bang_stack_shadow_pages(false); 1429 1430 // reset the _do_not_unlock_if_synchronized flag 1431 __ strb(zr, do_not_unlock_if_synchronized); 1432 1433 // check for synchronized methods 1434 // Must happen AFTER invocation_counter check and stack overflow check, 1435 // so method is not locked if overflows. 1436 if (synchronized) { 1437 // Allocate monitor and lock method 1438 lock_method(); 1439 } else { 1440 // no synchronization necessary 1441 #ifdef ASSERT 1442 { 1443 Label L; 1444 __ ldrw(r0, access_flags); 1445 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1446 __ br(Assembler::EQ, L); 1447 __ stop("method needs synchronization"); 1448 __ bind(L); 1449 } 1450 #endif 1451 } 1452 1453 // start execution 1454 #ifdef ASSERT 1455 { 1456 Label L; 1457 const Address monitor_block_top (rfp, 1458 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1459 __ ldr(rscratch1, monitor_block_top); 1460 __ cmp(esp, rscratch1); 1461 __ br(Assembler::EQ, L); 1462 __ stop("broken stack frame setup in interpreter"); 1463 __ bind(L); 1464 } 1465 #endif 1466 1467 // jvmti support 1468 __ notify_method_entry(); 1469 1470 __ dispatch_next(vtos); 1471 1472 // invocation counter overflow 1473 if (inc_counter) { 1474 if (ProfileInterpreter) { 1475 // We have decided to profile this method in the interpreter 1476 __ bind(profile_method); 1477 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1478 __ set_method_data_pointer_for_bcp(); 1479 // don't think we need this 1480 __ get_method(r1); 1481 __ b(profile_method_continue); 1482 } 1483 // Handle overflow of counter and compile method 1484 __ bind(invocation_counter_overflow); 1485 generate_counter_overflow(&continue_after_compile); 1486 } 1487 1488 return entry_point; 1489 } 1490 1491 // Entry points 1492 // 1493 // Here we generate the various kind of entries into the interpreter. 1494 // The two main entry type are generic bytecode methods and native 1495 // call method. These both come in synchronized and non-synchronized 1496 // versions but the frame layout they create is very similar. The 1497 // other method entry types are really just special purpose entries 1498 // that are really entry and interpretation all in one. These are for 1499 // trivial methods like accessor, empty, or special math methods. 1500 // 1501 // When control flow reaches any of the entry types for the interpreter 1502 // the following holds -> 1503 // 1504 // Arguments: 1505 // 1506 // rmethod: Method* 1507 // 1508 // Stack layout immediately at entry 1509 // 1510 // [ return address ] <--- rsp 1511 // [ parameter n ] 1512 // ... 1513 // [ parameter 1 ] 1514 // [ expression stack ] (caller's java expression stack) 1515 1516 // Assuming that we don't go to one of the trivial specialized entries 1517 // the stack will look like below when we are ready to execute the 1518 // first bytecode (or call the native routine). The register usage 1519 // will be as the template based interpreter expects (see 1520 // interpreter_aarch64.hpp). 1521 // 1522 // local variables follow incoming parameters immediately; i.e. 1523 // the return address is moved to the end of the locals). 1524 // 1525 // [ monitor entry ] <--- esp 1526 // ... 1527 // [ monitor entry ] 1528 // [ expr. stack bottom ] 1529 // [ saved rbcp ] 1530 // [ current rlocals ] 1531 // [ Method* ] 1532 // [ saved rfp ] <--- rfp 1533 // [ return address ] 1534 // [ local variable m ] 1535 // ... 1536 // [ local variable 1 ] 1537 // [ parameter n ] 1538 // ... 1539 // [ parameter 1 ] <--- rlocals 1540 1541 address AbstractInterpreterGenerator::generate_method_entry( 1542 AbstractInterpreter::MethodKind kind) { 1543 // determine code generation flags 1544 bool synchronized = false; 1545 address entry_point = NULL; 1546 1547 switch (kind) { 1548 case Interpreter::zerolocals : break; 1549 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1550 case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break; 1551 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break; 1552 case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break; 1553 case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break; 1554 case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; 1555 1556 case Interpreter::java_lang_math_sin : // fall thru 1557 case Interpreter::java_lang_math_cos : // fall thru 1558 case Interpreter::java_lang_math_tan : // fall thru 1559 case Interpreter::java_lang_math_abs : // fall thru 1560 case Interpreter::java_lang_math_log : // fall thru 1561 case Interpreter::java_lang_math_log10 : // fall thru 1562 case Interpreter::java_lang_math_sqrt : // fall thru 1563 case Interpreter::java_lang_math_pow : // fall thru 1564 case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; 1565 case Interpreter::java_lang_ref_reference_get 1566 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 1567 case Interpreter::java_util_zip_CRC32_update 1568 : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_update_entry(); break; 1569 case Interpreter::java_util_zip_CRC32_updateBytes 1570 : // fall thru 1571 case Interpreter::java_util_zip_CRC32_updateByteBuffer 1572 : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_updateBytes_entry(kind); break; 1573 default : ShouldNotReachHere(); break; 1574 } 1575 1576 if (entry_point) { 1577 return entry_point; 1578 } 1579 1580 return ((InterpreterGenerator*) this)-> 1581 generate_normal_entry(synchronized); 1582 } 1583 1584 1585 // These should never be compiled since the interpreter will prefer 1586 // the compiled version to the intrinsic version. 1587 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1588 switch (method_kind(m)) { 1589 case Interpreter::java_lang_math_sin : // fall thru 1590 case Interpreter::java_lang_math_cos : // fall thru 1591 case Interpreter::java_lang_math_tan : // fall thru 1592 case Interpreter::java_lang_math_abs : // fall thru 1593 case Interpreter::java_lang_math_log : // fall thru 1594 case Interpreter::java_lang_math_log10 : // fall thru 1595 case Interpreter::java_lang_math_sqrt : // fall thru 1596 case Interpreter::java_lang_math_pow : // fall thru 1597 case Interpreter::java_lang_math_exp : 1598 return false; 1599 default: 1600 return true; 1601 } 1602 } 1603 1604 // How much stack a method activation needs in words. 1605 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1606 const int entry_size = frame::interpreter_frame_monitor_size(); 1607 1608 // total overhead size: entry_size + (saved rfp thru expr stack 1609 // bottom). be sure to change this if you add/subtract anything 1610 // to/from the overhead area 1611 const int overhead_size = 1612 -(frame::interpreter_frame_initial_sp_offset) + entry_size; 1613 1614 const int stub_code = frame::entry_frame_after_call_words; 1615 const int method_stack = (method->max_locals() + method->max_stack()) * 1616 Interpreter::stackElementWords; 1617 return (overhead_size + method_stack + stub_code); 1618 } 1619 1620 // asm based interpreter deoptimization helpers 1621 int AbstractInterpreter::size_activation(int max_stack, 1622 int temps, 1623 int extra_args, 1624 int monitors, 1625 int callee_params, 1626 int callee_locals, 1627 bool is_top_frame) { 1628 // Note: This calculation must exactly parallel the frame setup 1629 // in AbstractInterpreterGenerator::generate_method_entry. 1630 1631 // fixed size of an interpreter frame: 1632 int overhead = frame::sender_sp_offset - 1633 frame::interpreter_frame_initial_sp_offset; 1634 // Our locals were accounted for by the caller (or last_frame_adjust 1635 // on the transistion) Since the callee parameters already account 1636 // for the callee's params we only need to account for the extra 1637 // locals. 1638 int size = overhead + 1639 (callee_locals - callee_params) + 1640 monitors * frame::interpreter_frame_monitor_size() + 1641 // On the top frame, at all times SP <= ESP, and SP is 1642 // 16-aligned. We ensure this by adjusting SP on method 1643 // entry and re-entry to allow room for the maximum size of 1644 // the expression stack. When we call another method we bump 1645 // SP so that no stack space is wasted. So, only on the top 1646 // frame do we need to allow max_stack words. 1647 (is_top_frame ? max_stack : temps + extra_args); 1648 1649 // On AArch64 we always keep the stack pointer 16-aligned, so we 1650 // must round up here. 1651 size = round_to(size, 2); 1652 1653 return size; 1654 } 1655 1656 void AbstractInterpreter::layout_activation(Method* method, 1657 int tempcount, 1658 int popframe_extra_args, 1659 int moncount, 1660 int caller_actual_parameters, 1661 int callee_param_count, 1662 int callee_locals, 1663 frame* caller, 1664 frame* interpreter_frame, 1665 bool is_top_frame, 1666 bool is_bottom_frame) { 1667 // The frame interpreter_frame is guaranteed to be the right size, 1668 // as determined by a previous call to the size_activation() method. 1669 // It is also guaranteed to be walkable even though it is in a 1670 // skeletal state 1671 1672 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1673 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1674 Interpreter::stackElementWords; 1675 1676 #ifdef ASSERT 1677 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable"); 1678 #endif 1679 1680 interpreter_frame->interpreter_frame_set_method(method); 1681 // NOTE the difference in using sender_sp and 1682 // interpreter_frame_sender_sp interpreter_frame_sender_sp is 1683 // the original sp of the caller (the unextended_sp) and 1684 // sender_sp is fp+8/16 (32bit/64bit) XXX 1685 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1686 1687 #ifdef ASSERT 1688 if (caller->is_interpreted_frame()) { 1689 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1690 } 1691 #endif 1692 1693 interpreter_frame->interpreter_frame_set_locals(locals); 1694 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1695 BasicObjectLock* monbot = montop - moncount; 1696 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1697 1698 // Set last_sp 1699 intptr_t* esp = (intptr_t*) monbot - 1700 tempcount*Interpreter::stackElementWords - 1701 popframe_extra_args; 1702 interpreter_frame->interpreter_frame_set_last_sp(esp); 1703 1704 // All frames but the initial (oldest) interpreter frame we fill in have 1705 // a value for sender_sp that allows walking the stack but isn't 1706 // truly correct. Correct the value here. 1707 if (extra_locals != 0 && 1708 interpreter_frame->sender_sp() == 1709 interpreter_frame->interpreter_frame_sender_sp()) { 1710 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + 1711 extra_locals); 1712 } 1713 *interpreter_frame->interpreter_frame_cache_addr() = 1714 method->constants()->cache(); 1715 } 1716 1717 1718 //----------------------------------------------------------------------------- 1719 // Exceptions 1720 1721 void TemplateInterpreterGenerator::generate_throw_exception() { 1722 // Entry point in previous activation (i.e., if the caller was 1723 // interpreted) 1724 Interpreter::_rethrow_exception_entry = __ pc(); 1725 // Restore sp to interpreter_frame_last_sp even though we are going 1726 // to empty the expression stack for the exception processing. 1727 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1728 // r0: exception 1729 // r3: return address/pc that threw exception 1730 __ restore_bcp(); // rbcp points to call/send 1731 __ restore_locals(); 1732 __ restore_constant_pool_cache(); 1733 __ reinit_heapbase(); // restore rheapbase as heapbase. 1734 __ get_dispatch(); 1735 1736 // Entry point for exceptions thrown within interpreter code 1737 Interpreter::_throw_exception_entry = __ pc(); 1738 // If we came here via a NullPointerException on the receiver of a 1739 // method, rmethod may be corrupt. 1740 __ get_method(rmethod); 1741 // expression stack is undefined here 1742 // r0: exception 1743 // rbcp: exception bcp 1744 __ verify_oop(r0); 1745 __ mov(c_rarg1, r0); 1746 1747 // expression stack must be empty before entering the VM in case of 1748 // an exception 1749 __ empty_expression_stack(); 1750 // find exception handler address and preserve exception oop 1751 __ call_VM(r3, 1752 CAST_FROM_FN_PTR(address, 1753 InterpreterRuntime::exception_handler_for_exception), 1754 c_rarg1); 1755 1756 // Calculate stack limit 1757 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1758 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1759 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() 1760 + (EnableInvokeDynamic ? 2 : 0) + 2); 1761 __ ldr(rscratch2, 1762 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1763 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 1764 __ andr(sp, rscratch1, -16); 1765 1766 // r0: exception handler entry point 1767 // r3: preserved exception oop 1768 // rbcp: bcp for exception handler 1769 __ push_ptr(r3); // push exception which is now the only value on the stack 1770 __ br(r0); // jump to exception handler (may be _remove_activation_entry!) 1771 1772 // If the exception is not handled in the current frame the frame is 1773 // removed and the exception is rethrown (i.e. exception 1774 // continuation is _rethrow_exception). 1775 // 1776 // Note: At this point the bci is still the bxi for the instruction 1777 // which caused the exception and the expression stack is 1778 // empty. Thus, for any VM calls at this point, GC will find a legal 1779 // oop map (with empty expression stack). 1780 1781 // 1782 // JVMTI PopFrame support 1783 // 1784 1785 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1786 __ empty_expression_stack(); 1787 // Set the popframe_processing bit in pending_popframe_condition 1788 // indicating that we are currently handling popframe, so that 1789 // call_VMs that may happen later do not trigger new popframe 1790 // handling cycles. 1791 __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1792 __ orr(r3, r3, JavaThread::popframe_processing_bit); 1793 __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1794 1795 { 1796 // Check to see whether we are returning to a deoptimized frame. 1797 // (The PopFrame call ensures that the caller of the popped frame is 1798 // either interpreted or compiled and deoptimizes it if compiled.) 1799 // In this case, we can't call dispatch_next() after the frame is 1800 // popped, but instead must save the incoming arguments and restore 1801 // them after deoptimization has occurred. 1802 // 1803 // Note that we don't compare the return PC against the 1804 // deoptimization blob's unpack entry because of the presence of 1805 // adapter frames in C2. 1806 Label caller_not_deoptimized; 1807 __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize)); 1808 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1809 InterpreterRuntime::interpreter_contains), c_rarg1); 1810 __ cbnz(r0, caller_not_deoptimized); 1811 1812 // Compute size of arguments for saving when returning to 1813 // deoptimized caller 1814 __ get_method(r0); 1815 __ ldr(r0, Address(r0, Method::const_offset())); 1816 __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod:: 1817 size_of_parameters_offset()))); 1818 __ lsl(r0, r0, Interpreter::logStackElementSize); 1819 __ restore_locals(); // XXX do we need this? 1820 __ sub(rlocals, rlocals, r0); 1821 __ add(rlocals, rlocals, wordSize); 1822 // Save these arguments 1823 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1824 Deoptimization:: 1825 popframe_preserve_args), 1826 rthread, r0, rlocals); 1827 1828 __ remove_activation(vtos, 1829 /* throw_monitor_exception */ false, 1830 /* install_monitor_exception */ false, 1831 /* notify_jvmdi */ false); 1832 1833 // Inform deoptimization that it is responsible for restoring 1834 // these arguments 1835 __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1836 __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 1837 1838 // Continue in deoptimization handler 1839 __ ret(lr); 1840 1841 __ bind(caller_not_deoptimized); 1842 } 1843 1844 __ remove_activation(vtos, 1845 /* throw_monitor_exception */ false, 1846 /* install_monitor_exception */ false, 1847 /* notify_jvmdi */ false); 1848 1849 // Restore the last_sp and null it out 1850 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1851 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1852 1853 __ restore_bcp(); 1854 __ restore_locals(); 1855 __ restore_constant_pool_cache(); 1856 __ get_method(rmethod); 1857 __ get_dispatch(); 1858 1859 // The method data pointer was incremented already during 1860 // call profiling. We have to restore the mdp for the current bcp. 1861 if (ProfileInterpreter) { 1862 __ set_method_data_pointer_for_bcp(); 1863 } 1864 1865 // Clear the popframe condition flag 1866 __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset())); 1867 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1868 1869 #if INCLUDE_JVMTI 1870 if (EnableInvokeDynamic) { 1871 Label L_done; 1872 1873 __ ldrb(rscratch1, Address(rbcp, 0)); 1874 __ cmpw(rscratch1, Bytecodes::_invokestatic); 1875 __ br(Assembler::NE, L_done); 1876 1877 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1878 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1879 1880 __ ldr(c_rarg0, Address(rlocals, 0)); 1881 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); 1882 1883 __ cbz(r0, L_done); 1884 1885 __ str(r0, Address(esp, 0)); 1886 __ bind(L_done); 1887 } 1888 #endif // INCLUDE_JVMTI 1889 1890 // Restore machine SP 1891 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1892 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1893 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() 1894 + (EnableInvokeDynamic ? 2 : 0)); 1895 __ ldr(rscratch2, 1896 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1897 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 1898 __ andr(sp, rscratch1, -16); 1899 1900 __ dispatch_next(vtos); 1901 // end of PopFrame support 1902 1903 Interpreter::_remove_activation_entry = __ pc(); 1904 1905 // preserve exception over this code sequence 1906 __ pop_ptr(r0); 1907 __ str(r0, Address(rthread, JavaThread::vm_result_offset())); 1908 // remove the activation (without doing throws on illegalMonitorExceptions) 1909 __ remove_activation(vtos, false, true, false); 1910 // restore exception 1911 __ get_vm_result(r0, rthread); 1912 1913 // In between activations - previous activation type unknown yet 1914 // compute continuation point - the continuation point expects the 1915 // following registers set up: 1916 // 1917 // r0: exception 1918 // lr: return address/pc that threw exception 1919 // esp: expression stack of caller 1920 // rfp: fp of caller 1921 __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address 1922 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1923 SharedRuntime::exception_handler_for_return_address), 1924 rthread, lr); 1925 __ mov(r1, r0); // save exception handler 1926 __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address 1927 // We might be returning to a deopt handler that expects r3 to 1928 // contain the exception pc 1929 __ mov(r3, lr); 1930 // Note that an "issuing PC" is actually the next PC after the call 1931 __ br(r1); // jump to exception 1932 // handler of caller 1933 } 1934 1935 1936 // 1937 // JVMTI ForceEarlyReturn support 1938 // 1939 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1940 address entry = __ pc(); 1941 1942 __ restore_bcp(); 1943 __ restore_locals(); 1944 __ empty_expression_stack(); 1945 __ load_earlyret_value(state); 1946 1947 __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 1948 Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset()); 1949 1950 // Clear the earlyret state 1951 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1952 __ str(zr, cond_addr); 1953 1954 __ remove_activation(state, 1955 false, /* throw_monitor_exception */ 1956 false, /* install_monitor_exception */ 1957 true); /* notify_jvmdi */ 1958 __ ret(lr); 1959 1960 return entry; 1961 } // end of ForceEarlyReturn support 1962 1963 1964 1965 //----------------------------------------------------------------------------- 1966 // Helper for vtos entry point generation 1967 1968 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1969 address& bep, 1970 address& cep, 1971 address& sep, 1972 address& aep, 1973 address& iep, 1974 address& lep, 1975 address& fep, 1976 address& dep, 1977 address& vep) { 1978 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1979 Label L; 1980 aep = __ pc(); __ push_ptr(); __ b(L); 1981 fep = __ pc(); __ push_f(); __ b(L); 1982 dep = __ pc(); __ push_d(); __ b(L); 1983 lep = __ pc(); __ push_l(); __ b(L); 1984 bep = cep = sep = 1985 iep = __ pc(); __ push_i(); 1986 vep = __ pc(); 1987 __ bind(L); 1988 generate_and_dispatch(t); 1989 } 1990 1991 //----------------------------------------------------------------------------- 1992 // Generation of individual instructions 1993 1994 // helpers for generate_and_dispatch 1995 1996 1997 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1998 : TemplateInterpreterGenerator(code) { 1999 generate_all(); // down here so it can be "virtual" 2000 } 2001 2002 //----------------------------------------------------------------------------- 2003 2004 // Non-product code 2005 #ifndef PRODUCT 2006 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2007 address entry = __ pc(); 2008 2009 __ push(lr); 2010 __ push(state); 2011 __ push(RegSet::range(r0, r15), sp); 2012 __ mov(c_rarg2, r0); // Pass itos 2013 __ call_VM(noreg, 2014 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 2015 c_rarg1, c_rarg2, c_rarg3); 2016 __ pop(RegSet::range(r0, r15), sp); 2017 __ pop(state); 2018 __ pop(lr); 2019 __ ret(lr); // return from result handler 2020 2021 return entry; 2022 } 2023 2024 void TemplateInterpreterGenerator::count_bytecode() { 2025 Register rscratch3 = r0; 2026 __ push(rscratch1); 2027 __ push(rscratch2); 2028 __ push(rscratch3); 2029 __ mov(rscratch3, (address) &BytecodeCounter::_counter_value); 2030 __ atomic_add(noreg, 1, rscratch3); 2031 __ pop(rscratch3); 2032 __ pop(rscratch2); 2033 __ pop(rscratch1); 2034 } 2035 2036 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } 2037 2038 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } 2039 2040 2041 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2042 // Call a little run-time stub to avoid blow-up for each bytecode. 2043 // The run-time runtime saves the right registers, depending on 2044 // the tosca in-state for the given template. 2045 2046 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2047 "entry must have been generated"); 2048 __ bl(Interpreter::trace_code(t->tos_in())); 2049 __ reinit_heapbase(); 2050 } 2051 2052 2053 void TemplateInterpreterGenerator::stop_interpreter_at() { 2054 Label L; 2055 __ push(rscratch1); 2056 __ mov(rscratch1, (address) &BytecodeCounter::_counter_value); 2057 __ ldr(rscratch1, Address(rscratch1)); 2058 __ mov(rscratch2, StopInterpreterAt); 2059 __ cmpw(rscratch1, rscratch2); 2060 __ br(Assembler::NE, L); 2061 __ brk(0); 2062 __ bind(L); 2063 __ pop(rscratch1); 2064 } 2065 2066 #endif // !PRODUCT 2067 #endif // ! CC_INTERP