1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "interp_masm_aarch64.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "logging/log.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/markOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "prims/jvmtiThreadState.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/thread.inline.hpp" 41 42 43 void InterpreterMacroAssembler::narrow(Register result) { 44 45 // Get method->_constMethod->_result_type 46 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize)); 47 ldr(rscratch1, Address(rscratch1, Method::const_offset())); 48 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset())); 49 50 Label done, notBool, notByte, notChar; 51 52 // common case first 53 cmpw(rscratch1, T_INT); 54 br(Assembler::EQ, done); 55 56 // mask integer result to narrower return type. 57 cmpw(rscratch1, T_BOOLEAN); 58 br(Assembler::NE, notBool); 59 andw(result, result, 0x1); 60 b(done); 61 62 bind(notBool); 63 cmpw(rscratch1, T_BYTE); 64 br(Assembler::NE, notByte); 65 sbfx(result, result, 0, 8); 66 b(done); 67 68 bind(notByte); 69 cmpw(rscratch1, T_CHAR); 70 br(Assembler::NE, notChar); 71 ubfx(result, result, 0, 16); // truncate upper 16 bits 72 b(done); 73 74 bind(notChar); 75 sbfx(result, result, 0, 16); // sign-extend short 76 77 // Nothing to do for T_INT 78 bind(done); 79 } 80 81 void InterpreterMacroAssembler::jump_to_entry(address entry) { 82 assert(entry, "Entry must have been generated by now"); 83 b(entry); 84 } 85 86 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 87 if (JvmtiExport::can_pop_frame()) { 88 Label L; 89 // Initiate popframe handling only if it is not already being 90 // processed. If the flag has the popframe_processing bit set, it 91 // means that this code is called *during* popframe handling - we 92 // don't want to reenter. 93 // This method is only called just after the call into the vm in 94 // call_VM_base, so the arg registers are available. 95 ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 96 tbz(rscratch1, exact_log2(JavaThread::popframe_pending_bit), L); 97 tbnz(rscratch1, exact_log2(JavaThread::popframe_processing_bit), L); 98 // Call Interpreter::remove_activation_preserving_args_entry() to get the 99 // address of the same-named entrypoint in the generated interpreter code. 100 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 101 br(r0); 102 bind(L); 103 } 104 } 105 106 107 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 108 ldr(r2, Address(rthread, JavaThread::jvmti_thread_state_offset())); 109 const Address tos_addr(r2, JvmtiThreadState::earlyret_tos_offset()); 110 const Address oop_addr(r2, JvmtiThreadState::earlyret_oop_offset()); 111 const Address val_addr(r2, JvmtiThreadState::earlyret_value_offset()); 112 switch (state) { 113 case atos: ldr(r0, oop_addr); 114 str(zr, oop_addr); 115 verify_oop(r0, state); break; 116 case ltos: ldr(r0, val_addr); break; 117 case btos: // fall through 118 case ztos: // fall through 119 case ctos: // fall through 120 case stos: // fall through 121 case itos: ldrw(r0, val_addr); break; 122 case ftos: ldrs(v0, val_addr); break; 123 case dtos: ldrd(v0, val_addr); break; 124 case vtos: /* nothing to do */ break; 125 default : ShouldNotReachHere(); 126 } 127 // Clean up tos value in the thread object 128 movw(rscratch1, (int) ilgl); 129 strw(rscratch1, tos_addr); 130 strw(zr, val_addr); 131 } 132 133 134 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 135 if (JvmtiExport::can_force_early_return()) { 136 Label L; 137 ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 138 cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit; 139 140 // Initiate earlyret handling only if it is not already being processed. 141 // If the flag has the earlyret_processing bit set, it means that this code 142 // is called *during* earlyret handling - we don't want to reenter. 143 ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset())); 144 cmpw(rscratch1, JvmtiThreadState::earlyret_pending); 145 br(Assembler::NE, L); 146 147 // Call Interpreter::remove_activation_early_entry() to get the address of the 148 // same-named entrypoint in the generated interpreter code. 149 ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 150 ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset())); 151 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1); 152 br(r0); 153 bind(L); 154 } 155 } 156 157 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp( 158 Register reg, 159 int bcp_offset) { 160 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 161 ldrh(reg, Address(rbcp, bcp_offset)); 162 rev16(reg, reg); 163 } 164 165 void InterpreterMacroAssembler::get_dispatch() { 166 unsigned long offset; 167 adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset); 168 lea(rdispatch, Address(rdispatch, offset)); 169 } 170 171 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 172 int bcp_offset, 173 size_t index_size) { 174 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 175 if (index_size == sizeof(u2)) { 176 load_unsigned_short(index, Address(rbcp, bcp_offset)); 177 } else if (index_size == sizeof(u4)) { 178 // assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 179 ldrw(index, Address(rbcp, bcp_offset)); 180 // Check if the secondary index definition is still ~x, otherwise 181 // we have to change the following assembler code to calculate the 182 // plain index. 183 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 184 eonw(index, index, zr); // convert to plain index 185 } else if (index_size == sizeof(u1)) { 186 load_unsigned_byte(index, Address(rbcp, bcp_offset)); 187 } else { 188 ShouldNotReachHere(); 189 } 190 } 191 192 // Return 193 // Rindex: index into constant pool 194 // Rcache: address of cache entry - ConstantPoolCache::base_offset() 195 // 196 // A caller must add ConstantPoolCache::base_offset() to Rcache to get 197 // the true address of the cache entry. 198 // 199 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 200 Register index, 201 int bcp_offset, 202 size_t index_size) { 203 assert_different_registers(cache, index); 204 assert_different_registers(cache, rcpool); 205 get_cache_index_at_bcp(index, bcp_offset, index_size); 206 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 207 // convert from field index to ConstantPoolCacheEntry 208 // aarch64 already has the cache in rcpool so there is no need to 209 // install it in cache. instead we pre-add the indexed offset to 210 // rcpool and return it in cache. All clients of this method need to 211 // be modified accordingly. 212 add(cache, rcpool, index, Assembler::LSL, 5); 213 } 214 215 216 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 217 Register index, 218 Register bytecode, 219 int byte_no, 220 int bcp_offset, 221 size_t index_size) { 222 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 223 // We use a 32-bit load here since the layout of 64-bit words on 224 // little-endian machines allow us that. 225 // n.b. unlike x86 cache already includes the index offset 226 lea(bytecode, Address(cache, 227 ConstantPoolCache::base_offset() 228 + ConstantPoolCacheEntry::indices_offset())); 229 ldarw(bytecode, bytecode); 230 const int shift_count = (1 + byte_no) * BitsPerByte; 231 ubfx(bytecode, bytecode, shift_count, BitsPerByte); 232 } 233 234 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 235 Register tmp, 236 int bcp_offset, 237 size_t index_size) { 238 assert(cache != tmp, "must use different register"); 239 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 240 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 241 // convert from field index to ConstantPoolCacheEntry index 242 // and from word offset to byte offset 243 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 244 ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize)); 245 // skip past the header 246 add(cache, cache, in_bytes(ConstantPoolCache::base_offset())); 247 add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord); // construct pointer to cache entry 248 } 249 250 void InterpreterMacroAssembler::get_method_counters(Register method, 251 Register mcs, Label& skip) { 252 Label has_counters; 253 ldr(mcs, Address(method, Method::method_counters_offset())); 254 cbnz(mcs, has_counters); 255 call_VM(noreg, CAST_FROM_FN_PTR(address, 256 InterpreterRuntime::build_method_counters), method); 257 ldr(mcs, Address(method, Method::method_counters_offset())); 258 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory 259 bind(has_counters); 260 } 261 262 // Load object from cpool->resolved_references(index) 263 void InterpreterMacroAssembler::load_resolved_reference_at_index( 264 Register result, Register index) { 265 assert_different_registers(result, index); 266 // convert from field index to resolved_references() index and from 267 // word index to byte offset. Since this is a java object, it can be compressed 268 Register tmp = index; // reuse 269 lslw(tmp, tmp, LogBytesPerHeapOop); 270 271 get_constant_pool(result); 272 // load pointer for resolved_references[] objArray 273 ldr(result, Address(result, ConstantPool::cache_offset_in_bytes())); 274 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); 275 resolve_oop_handle(result); 276 // Add in the index 277 add(result, result, tmp); 278 load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 279 } 280 281 void InterpreterMacroAssembler::load_resolved_klass_at_offset( 282 Register cpool, Register index, Register klass, Register temp) { 283 add(temp, cpool, index, LSL, LogBytesPerWord); 284 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index 285 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses 286 add(klass, klass, temp, LSL, LogBytesPerWord); 287 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes())); 288 } 289 290 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 291 // subtype of super_klass. 292 // 293 // Args: 294 // r0: superklass 295 // Rsub_klass: subklass 296 // 297 // Kills: 298 // r2, r5 299 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 300 Label& ok_is_subtype) { 301 assert(Rsub_klass != r0, "r0 holds superklass"); 302 assert(Rsub_klass != r2, "r2 holds 2ndary super array length"); 303 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr"); 304 305 // Profile the not-null value's klass. 306 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5 307 308 // Do the check. 309 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2 310 311 // Profile the failure of the check. 312 profile_typecheck_failed(r2); // blows r2 313 } 314 315 // Java Expression Stack 316 317 void InterpreterMacroAssembler::pop_ptr(Register r) { 318 ldr(r, post(esp, wordSize)); 319 } 320 321 void InterpreterMacroAssembler::pop_i(Register r) { 322 ldrw(r, post(esp, wordSize)); 323 } 324 325 void InterpreterMacroAssembler::pop_l(Register r) { 326 ldr(r, post(esp, 2 * Interpreter::stackElementSize)); 327 } 328 329 void InterpreterMacroAssembler::push_ptr(Register r) { 330 str(r, pre(esp, -wordSize)); 331 } 332 333 void InterpreterMacroAssembler::push_i(Register r) { 334 str(r, pre(esp, -wordSize)); 335 } 336 337 void InterpreterMacroAssembler::push_l(Register r) { 338 str(zr, pre(esp, -wordSize)); 339 str(r, pre(esp, - wordSize)); 340 } 341 342 void InterpreterMacroAssembler::pop_f(FloatRegister r) { 343 ldrs(r, post(esp, wordSize)); 344 } 345 346 void InterpreterMacroAssembler::pop_d(FloatRegister r) { 347 ldrd(r, post(esp, 2 * Interpreter::stackElementSize)); 348 } 349 350 void InterpreterMacroAssembler::push_f(FloatRegister r) { 351 strs(r, pre(esp, -wordSize)); 352 } 353 354 void InterpreterMacroAssembler::push_d(FloatRegister r) { 355 strd(r, pre(esp, 2* -wordSize)); 356 } 357 358 void InterpreterMacroAssembler::pop(TosState state) { 359 switch (state) { 360 case atos: pop_ptr(); break; 361 case btos: 362 case ztos: 363 case ctos: 364 case stos: 365 case itos: pop_i(); break; 366 case ltos: pop_l(); break; 367 case ftos: pop_f(); break; 368 case dtos: pop_d(); break; 369 case vtos: /* nothing to do */ break; 370 default: ShouldNotReachHere(); 371 } 372 verify_oop(r0, state); 373 } 374 375 void InterpreterMacroAssembler::push(TosState state) { 376 verify_oop(r0, state); 377 switch (state) { 378 case atos: push_ptr(); break; 379 case btos: 380 case ztos: 381 case ctos: 382 case stos: 383 case itos: push_i(); break; 384 case ltos: push_l(); break; 385 case ftos: push_f(); break; 386 case dtos: push_d(); break; 387 case vtos: /* nothing to do */ break; 388 default : ShouldNotReachHere(); 389 } 390 } 391 392 // Helpers for swap and dup 393 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 394 ldr(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); 395 } 396 397 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 398 str(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); 399 } 400 401 402 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 403 // set sender sp 404 mov(r13, sp); 405 // record last_sp 406 str(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 407 } 408 409 // Jump to from_interpreted entry of a call unless single stepping is possible 410 // in this thread in which case we must call the i2i entry 411 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 412 prepare_to_jump_from_interpreted(); 413 414 if (JvmtiExport::can_post_interpreter_events()) { 415 Label run_compiled_code; 416 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 417 // compiled code in threads for which the event is enabled. Check here for 418 // interp_only_mode if these events CAN be enabled. 419 ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset())); 420 cbzw(rscratch1, run_compiled_code); 421 ldr(rscratch1, Address(method, Method::interpreter_entry_offset())); 422 br(rscratch1); 423 bind(run_compiled_code); 424 } 425 426 ldr(rscratch1, Address(method, Method::from_interpreted_offset())); 427 br(rscratch1); 428 } 429 430 // The following two routines provide a hook so that an implementation 431 // can schedule the dispatch in two parts. amd64 does not do this. 432 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 433 } 434 435 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 436 dispatch_next(state, step); 437 } 438 439 void InterpreterMacroAssembler::dispatch_base(TosState state, 440 address* table, 441 bool verifyoop) { 442 if (VerifyActivationFrameSize) { 443 Unimplemented(); 444 } 445 if (verifyoop) { 446 verify_oop(r0, state); 447 } 448 if (table == Interpreter::dispatch_table(state)) { 449 addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state)); 450 ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3))); 451 } else { 452 mov(rscratch2, (address)table); 453 ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3))); 454 } 455 br(rscratch2); 456 } 457 458 void InterpreterMacroAssembler::dispatch_only(TosState state) { 459 dispatch_base(state, Interpreter::dispatch_table(state)); 460 } 461 462 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 463 dispatch_base(state, Interpreter::normal_table(state)); 464 } 465 466 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 467 dispatch_base(state, Interpreter::normal_table(state), false); 468 } 469 470 471 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { 472 // load next bytecode 473 ldrb(rscratch1, Address(pre(rbcp, step))); 474 dispatch_base(state, Interpreter::dispatch_table(state)); 475 } 476 477 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 478 // load current bytecode 479 ldrb(rscratch1, Address(rbcp, 0)); 480 dispatch_base(state, table); 481 } 482 483 // remove activation 484 // 485 // Unlock the receiver if this is a synchronized method. 486 // Unlock any Java monitors from syncronized blocks. 487 // Remove the activation from the stack. 488 // 489 // If there are locked Java monitors 490 // If throw_monitor_exception 491 // throws IllegalMonitorStateException 492 // Else if install_monitor_exception 493 // installs IllegalMonitorStateException 494 // Else 495 // no error processing 496 void InterpreterMacroAssembler::remove_activation( 497 TosState state, 498 bool throw_monitor_exception, 499 bool install_monitor_exception, 500 bool notify_jvmdi) { 501 // Note: Registers r3 xmm0 may be in use for the 502 // result check if synchronized method 503 Label unlocked, unlock, no_unlock; 504 505 // get the value of _do_not_unlock_if_synchronized into r3 506 const Address do_not_unlock_if_synchronized(rthread, 507 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 508 ldrb(r3, do_not_unlock_if_synchronized); 509 strb(zr, do_not_unlock_if_synchronized); // reset the flag 510 511 // get method access flags 512 ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize)); 513 ldr(r2, Address(r1, Method::access_flags_offset())); 514 tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked); 515 516 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 517 // is set. 518 cbnz(r3, no_unlock); 519 520 // unlock monitor 521 push(state); // save result 522 523 // BasicObjectLock will be first in list, since this is a 524 // synchronized method. However, need to check that the object has 525 // not been unlocked by an explicit monitorexit bytecode. 526 const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset * 527 wordSize - (int) sizeof(BasicObjectLock)); 528 // We use c_rarg1 so that if we go slow path it will be the correct 529 // register for unlock_object to pass to VM directly 530 lea(c_rarg1, monitor); // address of first monitor 531 532 ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 533 cbnz(r0, unlock); 534 535 pop(state); 536 if (throw_monitor_exception) { 537 // Entry already unlocked, need to throw exception 538 call_VM(noreg, CAST_FROM_FN_PTR(address, 539 InterpreterRuntime::throw_illegal_monitor_state_exception)); 540 should_not_reach_here(); 541 } else { 542 // Monitor already unlocked during a stack unroll. If requested, 543 // install an illegal_monitor_state_exception. Continue with 544 // stack unrolling. 545 if (install_monitor_exception) { 546 call_VM(noreg, CAST_FROM_FN_PTR(address, 547 InterpreterRuntime::new_illegal_monitor_state_exception)); 548 } 549 b(unlocked); 550 } 551 552 bind(unlock); 553 unlock_object(c_rarg1); 554 pop(state); 555 556 // Check that for block-structured locking (i.e., that all locked 557 // objects has been unlocked) 558 bind(unlocked); 559 560 // r0: Might contain return value 561 562 // Check that all monitors are unlocked 563 { 564 Label loop, exception, entry, restart; 565 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 566 const Address monitor_block_top( 567 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 568 const Address monitor_block_bot( 569 rfp, frame::interpreter_frame_initial_sp_offset * wordSize); 570 571 bind(restart); 572 // We use c_rarg1 so that if we go slow path it will be the correct 573 // register for unlock_object to pass to VM directly 574 ldr(c_rarg1, monitor_block_top); // points to current entry, starting 575 // with top-most entry 576 lea(r19, monitor_block_bot); // points to word before bottom of 577 // monitor block 578 b(entry); 579 580 // Entry already locked, need to throw exception 581 bind(exception); 582 583 if (throw_monitor_exception) { 584 // Throw exception 585 MacroAssembler::call_VM(noreg, 586 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 587 throw_illegal_monitor_state_exception)); 588 should_not_reach_here(); 589 } else { 590 // Stack unrolling. Unlock object and install illegal_monitor_exception. 591 // Unlock does not block, so don't have to worry about the frame. 592 // We don't have to preserve c_rarg1 since we are going to throw an exception. 593 594 push(state); 595 unlock_object(c_rarg1); 596 pop(state); 597 598 if (install_monitor_exception) { 599 call_VM(noreg, CAST_FROM_FN_PTR(address, 600 InterpreterRuntime:: 601 new_illegal_monitor_state_exception)); 602 } 603 604 b(restart); 605 } 606 607 bind(loop); 608 // check if current entry is used 609 ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 610 cbnz(rscratch1, exception); 611 612 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry 613 bind(entry); 614 cmp(c_rarg1, r19); // check if bottom reached 615 br(Assembler::NE, loop); // if not at bottom then check this entry 616 } 617 618 bind(no_unlock); 619 620 // jvmti support 621 if (notify_jvmdi) { 622 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 623 } else { 624 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 625 } 626 627 // remove activation 628 // get sender esp 629 ldr(esp, 630 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); 631 if (StackReservedPages > 0) { 632 // testing if reserved zone needs to be re-enabled 633 Label no_reserved_zone_enabling; 634 635 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 636 cmp(esp, rscratch1); 637 br(Assembler::LS, no_reserved_zone_enabling); 638 639 call_VM_leaf( 640 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); 641 call_VM(noreg, CAST_FROM_FN_PTR(address, 642 InterpreterRuntime::throw_delayed_StackOverflowError)); 643 should_not_reach_here(); 644 645 bind(no_reserved_zone_enabling); 646 } 647 // remove frame anchor 648 leave(); 649 // If we're returning to interpreted code we will shortly be 650 // adjusting SP to allow some space for ESP. If we're returning to 651 // compiled code the saved sender SP was saved in sender_sp, so this 652 // restores it. 653 andr(sp, esp, -16); 654 } 655 656 // Lock object 657 // 658 // Args: 659 // c_rarg1: BasicObjectLock to be used for locking 660 // 661 // Kills: 662 // r0 663 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs) 664 // rscratch1, rscratch2 (scratch regs) 665 void InterpreterMacroAssembler::lock_object(Register lock_reg) 666 { 667 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1"); 668 if (UseHeavyMonitors) { 669 call_VM(noreg, 670 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 671 lock_reg); 672 } else { 673 Label done; 674 675 const Register swap_reg = r0; 676 const Register tmp = c_rarg2; 677 const Register obj_reg = c_rarg3; // Will contain the oop 678 679 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 680 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 681 const int mark_offset = lock_offset + 682 BasicLock::displaced_header_offset_in_bytes(); 683 684 Label slow_case; 685 686 // Load object pointer into obj_reg %c_rarg3 687 ldr(obj_reg, Address(lock_reg, obj_offset)); 688 689 if (UseBiasedLocking) { 690 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case); 691 } 692 693 // Load (object->mark() | 1) into swap_reg 694 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 695 orr(swap_reg, rscratch1, 1); 696 697 // Save (object->mark() | 1) into BasicLock's displaced header 698 str(swap_reg, Address(lock_reg, mark_offset)); 699 700 assert(lock_offset == 0, 701 "displached header must be first word in BasicObjectLock"); 702 703 Label fail; 704 if (PrintBiasedLockingStatistics) { 705 Label fast; 706 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail); 707 bind(fast); 708 atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()), 709 rscratch2, rscratch1, tmp); 710 b(done); 711 bind(fail); 712 } else { 713 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL); 714 } 715 716 // Test if the oopMark is an obvious stack pointer, i.e., 717 // 1) (mark & 7) == 0, and 718 // 2) rsp <= mark < mark + os::pagesize() 719 // 720 // These 3 tests can be done by evaluating the following 721 // expression: ((mark - rsp) & (7 - os::vm_page_size())), 722 // assuming both stack pointer and pagesize have their 723 // least significant 3 bits clear. 724 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg 725 // NOTE2: aarch64 does not like to subtract sp from rn so take a 726 // copy 727 mov(rscratch1, sp); 728 sub(swap_reg, swap_reg, rscratch1); 729 ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size())); 730 731 // Save the test result, for recursive case, the result is zero 732 str(swap_reg, Address(lock_reg, mark_offset)); 733 734 if (PrintBiasedLockingStatistics) { 735 br(Assembler::NE, slow_case); 736 atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()), 737 rscratch2, rscratch1, tmp); 738 } 739 br(Assembler::EQ, done); 740 741 bind(slow_case); 742 743 // Call the runtime routine for slow case 744 call_VM(noreg, 745 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 746 lock_reg); 747 748 bind(done); 749 } 750 } 751 752 753 // Unlocks an object. Used in monitorexit bytecode and 754 // remove_activation. Throws an IllegalMonitorException if object is 755 // not locked by current thread. 756 // 757 // Args: 758 // c_rarg1: BasicObjectLock for lock 759 // 760 // Kills: 761 // r0 762 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 763 // rscratch1, rscratch2 (scratch regs) 764 void InterpreterMacroAssembler::unlock_object(Register lock_reg) 765 { 766 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1"); 767 768 if (UseHeavyMonitors) { 769 call_VM(noreg, 770 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 771 lock_reg); 772 } else { 773 Label done; 774 775 const Register swap_reg = r0; 776 const Register header_reg = c_rarg2; // Will contain the old oopMark 777 const Register obj_reg = c_rarg3; // Will contain the oop 778 779 save_bcp(); // Save in case of exception 780 781 // Convert from BasicObjectLock structure to object and BasicLock 782 // structure Store the BasicLock address into %r0 783 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 784 785 // Load oop into obj_reg(%c_rarg3) 786 ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 787 788 // Free entry 789 str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 790 791 if (UseBiasedLocking) { 792 biased_locking_exit(obj_reg, header_reg, done); 793 } 794 795 // Load the old header from BasicLock structure 796 ldr(header_reg, Address(swap_reg, 797 BasicLock::displaced_header_offset_in_bytes())); 798 799 // Test for recursion 800 cbz(header_reg, done); 801 802 // Atomic swap back the old header 803 cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL); 804 805 // Call the runtime routine for slow case. 806 str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj 807 call_VM(noreg, 808 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 809 lock_reg); 810 811 bind(done); 812 813 restore_bcp(); 814 } 815 } 816 817 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 818 Label& zero_continue) { 819 assert(ProfileInterpreter, "must be profiling interpreter"); 820 ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 821 cbz(mdp, zero_continue); 822 } 823 824 // Set the method data pointer for the current bcp. 825 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 826 assert(ProfileInterpreter, "must be profiling interpreter"); 827 Label set_mdp; 828 stp(r0, r1, Address(pre(sp, -2 * wordSize))); 829 830 // Test MDO to avoid the call if it is NULL. 831 ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset()))); 832 cbz(r0, set_mdp); 833 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp); 834 // r0: mdi 835 // mdo is guaranteed to be non-zero here, we checked for it before the call. 836 ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset()))); 837 lea(r1, Address(r1, in_bytes(MethodData::data_offset()))); 838 add(r0, r1, r0); 839 str(r0, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 840 bind(set_mdp); 841 ldp(r0, r1, Address(post(sp, 2 * wordSize))); 842 } 843 844 void InterpreterMacroAssembler::verify_method_data_pointer() { 845 assert(ProfileInterpreter, "must be profiling interpreter"); 846 #ifdef ASSERT 847 Label verify_continue; 848 stp(r0, r1, Address(pre(sp, -2 * wordSize))); 849 stp(r2, r3, Address(pre(sp, -2 * wordSize))); 850 test_method_data_pointer(r3, verify_continue); // If mdp is zero, continue 851 get_method(r1); 852 853 // If the mdp is valid, it will point to a DataLayout header which is 854 // consistent with the bcp. The converse is highly probable also. 855 ldrsh(r2, Address(r3, in_bytes(DataLayout::bci_offset()))); 856 ldr(rscratch1, Address(r1, Method::const_offset())); 857 add(r2, r2, rscratch1, Assembler::LSL); 858 lea(r2, Address(r2, ConstMethod::codes_offset())); 859 cmp(r2, rbcp); 860 br(Assembler::EQ, verify_continue); 861 // r1: method 862 // rbcp: bcp // rbcp == 22 863 // r3: mdp 864 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 865 r1, rbcp, r3); 866 bind(verify_continue); 867 ldp(r2, r3, Address(post(sp, 2 * wordSize))); 868 ldp(r0, r1, Address(post(sp, 2 * wordSize))); 869 #endif // ASSERT 870 } 871 872 873 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 874 int constant, 875 Register value) { 876 assert(ProfileInterpreter, "must be profiling interpreter"); 877 Address data(mdp_in, constant); 878 str(value, data); 879 } 880 881 882 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 883 int constant, 884 bool decrement) { 885 increment_mdp_data_at(mdp_in, noreg, constant, decrement); 886 } 887 888 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 889 Register reg, 890 int constant, 891 bool decrement) { 892 assert(ProfileInterpreter, "must be profiling interpreter"); 893 // %%% this does 64bit counters at best it is wasting space 894 // at worst it is a rare bug when counters overflow 895 896 assert_different_registers(rscratch2, rscratch1, mdp_in, reg); 897 898 Address addr1(mdp_in, constant); 899 Address addr2(rscratch2, reg, Address::lsl(0)); 900 Address &addr = addr1; 901 if (reg != noreg) { 902 lea(rscratch2, addr1); 903 addr = addr2; 904 } 905 906 if (decrement) { 907 // Decrement the register. Set condition codes. 908 // Intel does this 909 // addptr(data, (int32_t) -DataLayout::counter_increment); 910 // If the decrement causes the counter to overflow, stay negative 911 // Label L; 912 // jcc(Assembler::negative, L); 913 // addptr(data, (int32_t) DataLayout::counter_increment); 914 // so we do this 915 ldr(rscratch1, addr); 916 subs(rscratch1, rscratch1, (unsigned)DataLayout::counter_increment); 917 Label L; 918 br(Assembler::LO, L); // skip store if counter underflow 919 str(rscratch1, addr); 920 bind(L); 921 } else { 922 assert(DataLayout::counter_increment == 1, 923 "flow-free idiom only works with 1"); 924 // Intel does this 925 // Increment the register. Set carry flag. 926 // addptr(data, DataLayout::counter_increment); 927 // If the increment causes the counter to overflow, pull back by 1. 928 // sbbptr(data, (int32_t)0); 929 // so we do this 930 ldr(rscratch1, addr); 931 adds(rscratch1, rscratch1, DataLayout::counter_increment); 932 Label L; 933 br(Assembler::CS, L); // skip store if counter overflow 934 str(rscratch1, addr); 935 bind(L); 936 } 937 } 938 939 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 940 int flag_byte_constant) { 941 assert(ProfileInterpreter, "must be profiling interpreter"); 942 int header_offset = in_bytes(DataLayout::header_offset()); 943 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); 944 // Set the flag 945 ldr(rscratch1, Address(mdp_in, header_offset)); 946 orr(rscratch1, rscratch1, header_bits); 947 str(rscratch1, Address(mdp_in, header_offset)); 948 } 949 950 951 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 952 int offset, 953 Register value, 954 Register test_value_out, 955 Label& not_equal_continue) { 956 assert(ProfileInterpreter, "must be profiling interpreter"); 957 if (test_value_out == noreg) { 958 ldr(rscratch1, Address(mdp_in, offset)); 959 cmp(value, rscratch1); 960 } else { 961 // Put the test value into a register, so caller can use it: 962 ldr(test_value_out, Address(mdp_in, offset)); 963 cmp(value, test_value_out); 964 } 965 br(Assembler::NE, not_equal_continue); 966 } 967 968 969 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 970 int offset_of_disp) { 971 assert(ProfileInterpreter, "must be profiling interpreter"); 972 ldr(rscratch1, Address(mdp_in, offset_of_disp)); 973 add(mdp_in, mdp_in, rscratch1, LSL); 974 str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 975 } 976 977 978 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 979 Register reg, 980 int offset_of_disp) { 981 assert(ProfileInterpreter, "must be profiling interpreter"); 982 lea(rscratch1, Address(mdp_in, offset_of_disp)); 983 ldr(rscratch1, Address(rscratch1, reg, Address::lsl(0))); 984 add(mdp_in, mdp_in, rscratch1, LSL); 985 str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 986 } 987 988 989 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 990 int constant) { 991 assert(ProfileInterpreter, "must be profiling interpreter"); 992 add(mdp_in, mdp_in, (unsigned)constant); 993 str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 994 } 995 996 997 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 998 assert(ProfileInterpreter, "must be profiling interpreter"); 999 // save/restore across call_VM 1000 stp(zr, return_bci, Address(pre(sp, -2 * wordSize))); 1001 call_VM(noreg, 1002 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1003 return_bci); 1004 ldp(zr, return_bci, Address(post(sp, 2 * wordSize))); 1005 } 1006 1007 1008 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1009 Register bumped_count) { 1010 if (ProfileInterpreter) { 1011 Label profile_continue; 1012 1013 // If no method data exists, go to profile_continue. 1014 // Otherwise, assign to mdp 1015 test_method_data_pointer(mdp, profile_continue); 1016 1017 // We are taking a branch. Increment the taken count. 1018 // We inline increment_mdp_data_at to return bumped_count in a register 1019 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1020 Address data(mdp, in_bytes(JumpData::taken_offset())); 1021 ldr(bumped_count, data); 1022 assert(DataLayout::counter_increment == 1, 1023 "flow-free idiom only works with 1"); 1024 // Intel does this to catch overflow 1025 // addptr(bumped_count, DataLayout::counter_increment); 1026 // sbbptr(bumped_count, 0); 1027 // so we do this 1028 adds(bumped_count, bumped_count, DataLayout::counter_increment); 1029 Label L; 1030 br(Assembler::CS, L); // skip store if counter overflow 1031 str(bumped_count, data); 1032 bind(L); 1033 // The method data pointer needs to be updated to reflect the new target. 1034 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1035 bind(profile_continue); 1036 } 1037 } 1038 1039 1040 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1041 if (ProfileInterpreter) { 1042 Label profile_continue; 1043 1044 // If no method data exists, go to profile_continue. 1045 test_method_data_pointer(mdp, profile_continue); 1046 1047 // We are taking a branch. Increment the not taken count. 1048 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1049 1050 // The method data pointer needs to be updated to correspond to 1051 // the next bytecode 1052 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1053 bind(profile_continue); 1054 } 1055 } 1056 1057 1058 void InterpreterMacroAssembler::profile_call(Register mdp) { 1059 if (ProfileInterpreter) { 1060 Label profile_continue; 1061 1062 // If no method data exists, go to profile_continue. 1063 test_method_data_pointer(mdp, profile_continue); 1064 1065 // We are making a call. Increment the count. 1066 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1067 1068 // The method data pointer needs to be updated to reflect the new target. 1069 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1070 bind(profile_continue); 1071 } 1072 } 1073 1074 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1075 if (ProfileInterpreter) { 1076 Label profile_continue; 1077 1078 // If no method data exists, go to profile_continue. 1079 test_method_data_pointer(mdp, profile_continue); 1080 1081 // We are making a call. Increment the count. 1082 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1083 1084 // The method data pointer needs to be updated to reflect the new target. 1085 update_mdp_by_constant(mdp, 1086 in_bytes(VirtualCallData:: 1087 virtual_call_data_size())); 1088 bind(profile_continue); 1089 } 1090 } 1091 1092 1093 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1094 Register mdp, 1095 Register reg2, 1096 bool receiver_can_be_null) { 1097 if (ProfileInterpreter) { 1098 Label profile_continue; 1099 1100 // If no method data exists, go to profile_continue. 1101 test_method_data_pointer(mdp, profile_continue); 1102 1103 Label skip_receiver_profile; 1104 if (receiver_can_be_null) { 1105 Label not_null; 1106 // We are making a call. Increment the count for null receiver. 1107 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1108 b(skip_receiver_profile); 1109 bind(not_null); 1110 } 1111 1112 // Record the receiver type. 1113 record_klass_in_profile(receiver, mdp, reg2, true); 1114 bind(skip_receiver_profile); 1115 1116 // The method data pointer needs to be updated to reflect the new target. 1117 #if INCLUDE_JVMCI 1118 if (MethodProfileWidth == 0) { 1119 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1120 } 1121 #else // INCLUDE_JVMCI 1122 update_mdp_by_constant(mdp, 1123 in_bytes(VirtualCallData:: 1124 virtual_call_data_size())); 1125 #endif // INCLUDE_JVMCI 1126 bind(profile_continue); 1127 } 1128 } 1129 1130 #if INCLUDE_JVMCI 1131 void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) { 1132 assert_different_registers(method, mdp, reg2); 1133 if (ProfileInterpreter && MethodProfileWidth > 0) { 1134 Label profile_continue; 1135 1136 // If no method data exists, go to profile_continue. 1137 test_method_data_pointer(mdp, profile_continue); 1138 1139 Label done; 1140 record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth, 1141 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1142 bind(done); 1143 1144 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1145 bind(profile_continue); 1146 } 1147 } 1148 #endif // INCLUDE_JVMCI 1149 1150 // This routine creates a state machine for updating the multi-row 1151 // type profile at a virtual call site (or other type-sensitive bytecode). 1152 // The machine visits each row (of receiver/count) until the receiver type 1153 // is found, or until it runs out of rows. At the same time, it remembers 1154 // the location of the first empty row. (An empty row records null for its 1155 // receiver, and can be allocated for a newly-observed receiver type.) 1156 // Because there are two degrees of freedom in the state, a simple linear 1157 // search will not work; it must be a decision tree. Hence this helper 1158 // function is recursive, to generate the required tree structured code. 1159 // It's the interpreter, so we are trading off code space for speed. 1160 // See below for example code. 1161 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1162 Register receiver, Register mdp, 1163 Register reg2, int start_row, 1164 Label& done, bool is_virtual_call) { 1165 if (TypeProfileWidth == 0) { 1166 if (is_virtual_call) { 1167 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1168 } 1169 #if INCLUDE_JVMCI 1170 else if (EnableJVMCI) { 1171 increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset())); 1172 } 1173 #endif // INCLUDE_JVMCI 1174 } else { 1175 int non_profiled_offset = -1; 1176 if (is_virtual_call) { 1177 non_profiled_offset = in_bytes(CounterData::count_offset()); 1178 } 1179 #if INCLUDE_JVMCI 1180 else if (EnableJVMCI) { 1181 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1182 } 1183 #endif // INCLUDE_JVMCI 1184 1185 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, 1186 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1187 } 1188 } 1189 1190 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, 1191 Register reg2, int start_row, Label& done, int total_rows, 1192 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1193 int non_profiled_offset) { 1194 int last_row = total_rows - 1; 1195 assert(start_row <= last_row, "must be work left to do"); 1196 // Test this row for both the item and for null. 1197 // Take any of three different outcomes: 1198 // 1. found item => increment count and goto done 1199 // 2. found null => keep looking for case 1, maybe allocate this cell 1200 // 3. found something else => keep looking for cases 1 and 2 1201 // Case 3 is handled by a recursive call. 1202 for (int row = start_row; row <= last_row; row++) { 1203 Label next_test; 1204 bool test_for_null_also = (row == start_row); 1205 1206 // See if the item is item[n]. 1207 int item_offset = in_bytes(item_offset_fn(row)); 1208 test_mdp_data_at(mdp, item_offset, item, 1209 (test_for_null_also ? reg2 : noreg), 1210 next_test); 1211 // (Reg2 now contains the item from the CallData.) 1212 1213 // The item is item[n]. Increment count[n]. 1214 int count_offset = in_bytes(item_count_offset_fn(row)); 1215 increment_mdp_data_at(mdp, count_offset); 1216 b(done); 1217 bind(next_test); 1218 1219 if (test_for_null_also) { 1220 Label found_null; 1221 // Failed the equality check on item[n]... Test for null. 1222 if (start_row == last_row) { 1223 // The only thing left to do is handle the null case. 1224 if (non_profiled_offset >= 0) { 1225 cbz(reg2, found_null); 1226 // Item did not match any saved item and there is no empty row for it. 1227 // Increment total counter to indicate polymorphic case. 1228 increment_mdp_data_at(mdp, non_profiled_offset); 1229 b(done); 1230 bind(found_null); 1231 } else { 1232 cbnz(reg2, done); 1233 } 1234 break; 1235 } 1236 // Since null is rare, make it be the branch-taken case. 1237 cbz(reg2, found_null); 1238 1239 // Put all the "Case 3" tests here. 1240 record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, 1241 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1242 1243 // Found a null. Keep searching for a matching item, 1244 // but remember that this is an empty (unused) slot. 1245 bind(found_null); 1246 } 1247 } 1248 1249 // In the fall-through case, we found no matching item, but we 1250 // observed the item[start_row] is NULL. 1251 1252 // Fill in the item field and increment the count. 1253 int item_offset = in_bytes(item_offset_fn(start_row)); 1254 set_mdp_data_at(mdp, item_offset, item); 1255 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1256 mov(reg2, DataLayout::counter_increment); 1257 set_mdp_data_at(mdp, count_offset, reg2); 1258 if (start_row > 0) { 1259 b(done); 1260 } 1261 } 1262 1263 // Example state machine code for three profile rows: 1264 // // main copy of decision tree, rooted at row[1] 1265 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1266 // if (row[0].rec != NULL) { 1267 // // inner copy of decision tree, rooted at row[1] 1268 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1269 // if (row[1].rec != NULL) { 1270 // // degenerate decision tree, rooted at row[2] 1271 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1272 // if (row[2].rec != NULL) { count.incr(); goto done; } // overflow 1273 // row[2].init(rec); goto done; 1274 // } else { 1275 // // remember row[1] is empty 1276 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1277 // row[1].init(rec); goto done; 1278 // } 1279 // } else { 1280 // // remember row[0] is empty 1281 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1282 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1283 // row[0].init(rec); goto done; 1284 // } 1285 // done: 1286 1287 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1288 Register mdp, Register reg2, 1289 bool is_virtual_call) { 1290 assert(ProfileInterpreter, "must be profiling"); 1291 Label done; 1292 1293 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1294 1295 bind (done); 1296 } 1297 1298 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1299 Register mdp) { 1300 if (ProfileInterpreter) { 1301 Label profile_continue; 1302 uint row; 1303 1304 // If no method data exists, go to profile_continue. 1305 test_method_data_pointer(mdp, profile_continue); 1306 1307 // Update the total ret count. 1308 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1309 1310 for (row = 0; row < RetData::row_limit(); row++) { 1311 Label next_test; 1312 1313 // See if return_bci is equal to bci[n]: 1314 test_mdp_data_at(mdp, 1315 in_bytes(RetData::bci_offset(row)), 1316 return_bci, noreg, 1317 next_test); 1318 1319 // return_bci is equal to bci[n]. Increment the count. 1320 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1321 1322 // The method data pointer needs to be updated to reflect the new target. 1323 update_mdp_by_offset(mdp, 1324 in_bytes(RetData::bci_displacement_offset(row))); 1325 b(profile_continue); 1326 bind(next_test); 1327 } 1328 1329 update_mdp_for_ret(return_bci); 1330 1331 bind(profile_continue); 1332 } 1333 } 1334 1335 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1336 if (ProfileInterpreter) { 1337 Label profile_continue; 1338 1339 // If no method data exists, go to profile_continue. 1340 test_method_data_pointer(mdp, profile_continue); 1341 1342 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1343 1344 // The method data pointer needs to be updated. 1345 int mdp_delta = in_bytes(BitData::bit_data_size()); 1346 if (TypeProfileCasts) { 1347 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1348 } 1349 update_mdp_by_constant(mdp, mdp_delta); 1350 1351 bind(profile_continue); 1352 } 1353 } 1354 1355 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1356 if (ProfileInterpreter && TypeProfileCasts) { 1357 Label profile_continue; 1358 1359 // If no method data exists, go to profile_continue. 1360 test_method_data_pointer(mdp, profile_continue); 1361 1362 int count_offset = in_bytes(CounterData::count_offset()); 1363 // Back up the address, since we have already bumped the mdp. 1364 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1365 1366 // *Decrement* the counter. We expect to see zero or small negatives. 1367 increment_mdp_data_at(mdp, count_offset, true); 1368 1369 bind (profile_continue); 1370 } 1371 } 1372 1373 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1374 if (ProfileInterpreter) { 1375 Label profile_continue; 1376 1377 // If no method data exists, go to profile_continue. 1378 test_method_data_pointer(mdp, profile_continue); 1379 1380 // The method data pointer needs to be updated. 1381 int mdp_delta = in_bytes(BitData::bit_data_size()); 1382 if (TypeProfileCasts) { 1383 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1384 1385 // Record the object type. 1386 record_klass_in_profile(klass, mdp, reg2, false); 1387 } 1388 update_mdp_by_constant(mdp, mdp_delta); 1389 1390 bind(profile_continue); 1391 } 1392 } 1393 1394 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1395 if (ProfileInterpreter) { 1396 Label profile_continue; 1397 1398 // If no method data exists, go to profile_continue. 1399 test_method_data_pointer(mdp, profile_continue); 1400 1401 // Update the default case count 1402 increment_mdp_data_at(mdp, 1403 in_bytes(MultiBranchData::default_count_offset())); 1404 1405 // The method data pointer needs to be updated. 1406 update_mdp_by_offset(mdp, 1407 in_bytes(MultiBranchData:: 1408 default_displacement_offset())); 1409 1410 bind(profile_continue); 1411 } 1412 } 1413 1414 void InterpreterMacroAssembler::profile_switch_case(Register index, 1415 Register mdp, 1416 Register reg2) { 1417 if (ProfileInterpreter) { 1418 Label profile_continue; 1419 1420 // If no method data exists, go to profile_continue. 1421 test_method_data_pointer(mdp, profile_continue); 1422 1423 // Build the base (index * per_case_size_in_bytes()) + 1424 // case_array_offset_in_bytes() 1425 movw(reg2, in_bytes(MultiBranchData::per_case_size())); 1426 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset())); 1427 Assembler::maddw(index, index, reg2, rscratch1); 1428 1429 // Update the case count 1430 increment_mdp_data_at(mdp, 1431 index, 1432 in_bytes(MultiBranchData::relative_count_offset())); 1433 1434 // The method data pointer needs to be updated. 1435 update_mdp_by_offset(mdp, 1436 index, 1437 in_bytes(MultiBranchData:: 1438 relative_displacement_offset())); 1439 1440 bind(profile_continue); 1441 } 1442 } 1443 1444 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 1445 if (state == atos) { 1446 MacroAssembler::verify_oop(reg); 1447 } 1448 } 1449 1450 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; } 1451 1452 1453 void InterpreterMacroAssembler::notify_method_entry() { 1454 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1455 // track stack depth. If it is possible to enter interp_only_mode we add 1456 // the code to check if the event should be sent. 1457 if (JvmtiExport::can_post_interpreter_events()) { 1458 Label L; 1459 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset())); 1460 cbzw(r3, L); 1461 call_VM(noreg, CAST_FROM_FN_PTR(address, 1462 InterpreterRuntime::post_method_entry)); 1463 bind(L); 1464 } 1465 1466 { 1467 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1468 get_method(c_rarg1); 1469 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1470 rthread, c_rarg1); 1471 } 1472 1473 // RedefineClasses() tracing support for obsolete method entry 1474 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1475 get_method(c_rarg1); 1476 call_VM_leaf( 1477 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1478 rthread, c_rarg1); 1479 } 1480 1481 } 1482 1483 1484 void InterpreterMacroAssembler::notify_method_exit( 1485 TosState state, NotifyMethodExitMode mode) { 1486 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1487 // track stack depth. If it is possible to enter interp_only_mode we add 1488 // the code to check if the event should be sent. 1489 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 1490 Label L; 1491 // Note: frame::interpreter_frame_result has a dependency on how the 1492 // method result is saved across the call to post_method_exit. If this 1493 // is changed then the interpreter_frame_result implementation will 1494 // need to be updated too. 1495 1496 // template interpreter will leave the result on the top of the stack. 1497 push(state); 1498 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset())); 1499 cbz(r3, L); 1500 call_VM(noreg, 1501 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 1502 bind(L); 1503 pop(state); 1504 } 1505 1506 { 1507 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1508 push(state); 1509 get_method(c_rarg1); 1510 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 1511 rthread, c_rarg1); 1512 pop(state); 1513 } 1514 } 1515 1516 1517 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1518 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1519 int increment, Address mask, 1520 Register scratch, Register scratch2, 1521 bool preloaded, Condition cond, 1522 Label* where) { 1523 if (!preloaded) { 1524 ldrw(scratch, counter_addr); 1525 } 1526 add(scratch, scratch, increment); 1527 strw(scratch, counter_addr); 1528 ldrw(scratch2, mask); 1529 ands(scratch, scratch, scratch2); 1530 br(cond, *where); 1531 } 1532 1533 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 1534 int number_of_arguments) { 1535 // interpreter specific 1536 // 1537 // Note: No need to save/restore rbcp & rlocals pointer since these 1538 // are callee saved registers and no blocking/ GC can happen 1539 // in leaf calls. 1540 #ifdef ASSERT 1541 { 1542 Label L; 1543 ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1544 cbz(rscratch1, L); 1545 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 1546 " last_sp != NULL"); 1547 bind(L); 1548 } 1549 #endif /* ASSERT */ 1550 // super call 1551 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1552 } 1553 1554 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 1555 Register java_thread, 1556 Register last_java_sp, 1557 address entry_point, 1558 int number_of_arguments, 1559 bool check_exceptions) { 1560 // interpreter specific 1561 // 1562 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 1563 // really make a difference for these runtime calls, since they are 1564 // slow anyway. Btw., bcp must be saved/restored since it may change 1565 // due to GC. 1566 // assert(java_thread == noreg , "not expecting a precomputed java thread"); 1567 save_bcp(); 1568 #ifdef ASSERT 1569 { 1570 Label L; 1571 ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1572 cbz(rscratch1, L); 1573 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 1574 " last_sp != NULL"); 1575 bind(L); 1576 } 1577 #endif /* ASSERT */ 1578 // super call 1579 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 1580 entry_point, number_of_arguments, 1581 check_exceptions); 1582 // interpreter specific 1583 restore_bcp(); 1584 restore_locals(); 1585 } 1586 1587 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { 1588 Label update, next, none; 1589 1590 verify_oop(obj); 1591 1592 cbnz(obj, update); 1593 orptr(mdo_addr, TypeEntries::null_seen); 1594 b(next); 1595 1596 bind(update); 1597 load_klass(obj, obj); 1598 1599 ldr(rscratch1, mdo_addr); 1600 eor(obj, obj, rscratch1); 1601 tst(obj, TypeEntries::type_klass_mask); 1602 br(Assembler::EQ, next); // klass seen before, nothing to 1603 // do. The unknown bit may have been 1604 // set already but no need to check. 1605 1606 tbnz(obj, exact_log2(TypeEntries::type_unknown), next); 1607 // already unknown. Nothing to do anymore. 1608 1609 ldr(rscratch1, mdo_addr); 1610 cbz(rscratch1, none); 1611 cmp(rscratch1, TypeEntries::null_seen); 1612 br(Assembler::EQ, none); 1613 // There is a chance that the checks above (re-reading profiling 1614 // data from memory) fail if another thread has just set the 1615 // profiling to this obj's klass 1616 ldr(rscratch1, mdo_addr); 1617 eor(obj, obj, rscratch1); 1618 tst(obj, TypeEntries::type_klass_mask); 1619 br(Assembler::EQ, next); 1620 1621 // different than before. Cannot keep accurate profile. 1622 orptr(mdo_addr, TypeEntries::type_unknown); 1623 b(next); 1624 1625 bind(none); 1626 // first time here. Set profile type. 1627 str(obj, mdo_addr); 1628 1629 bind(next); 1630 } 1631 1632 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { 1633 if (!ProfileInterpreter) { 1634 return; 1635 } 1636 1637 if (MethodData::profile_arguments() || MethodData::profile_return()) { 1638 Label profile_continue; 1639 1640 test_method_data_pointer(mdp, profile_continue); 1641 1642 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 1643 1644 ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start)); 1645 cmp(rscratch1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 1646 br(Assembler::NE, profile_continue); 1647 1648 if (MethodData::profile_arguments()) { 1649 Label done; 1650 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 1651 1652 for (int i = 0; i < TypeProfileArgsLimit; i++) { 1653 if (i > 0 || MethodData::profile_return()) { 1654 // If return value type is profiled we may have no argument to profile 1655 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); 1656 sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count()); 1657 cmp(tmp, TypeStackSlotEntries::per_arg_count()); 1658 add(rscratch1, mdp, off_to_args); 1659 br(Assembler::LT, done); 1660 } 1661 ldr(tmp, Address(callee, Method::const_offset())); 1662 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); 1663 // stack offset o (zero based) from the start of the argument 1664 // list, for n arguments translates into offset n - o - 1 from 1665 // the end of the argument list 1666 ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i)))); 1667 sub(tmp, tmp, rscratch1); 1668 sub(tmp, tmp, 1); 1669 Address arg_addr = argument_address(tmp); 1670 ldr(tmp, arg_addr); 1671 1672 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))); 1673 profile_obj_type(tmp, mdo_arg_addr); 1674 1675 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 1676 off_to_args += to_add; 1677 } 1678 1679 if (MethodData::profile_return()) { 1680 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); 1681 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 1682 } 1683 1684 add(rscratch1, mdp, off_to_args); 1685 bind(done); 1686 mov(mdp, rscratch1); 1687 1688 if (MethodData::profile_return()) { 1689 // We're right after the type profile for the last 1690 // argument. tmp is the number of cells left in the 1691 // CallTypeData/VirtualCallTypeData to reach its end. Non null 1692 // if there's a return to profile. 1693 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 1694 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size)); 1695 } 1696 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 1697 } else { 1698 assert(MethodData::profile_return(), "either profile call args or call ret"); 1699 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); 1700 } 1701 1702 // mdp points right after the end of the 1703 // CallTypeData/VirtualCallTypeData, right after the cells for the 1704 // return value type if there's one 1705 1706 bind(profile_continue); 1707 } 1708 } 1709 1710 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { 1711 assert_different_registers(mdp, ret, tmp, rbcp); 1712 if (ProfileInterpreter && MethodData::profile_return()) { 1713 Label profile_continue, done; 1714 1715 test_method_data_pointer(mdp, profile_continue); 1716 1717 if (MethodData::profile_return_jsr292_only()) { 1718 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 1719 1720 // If we don't profile all invoke bytecodes we must make sure 1721 // it's a bytecode we indeed profile. We can't go back to the 1722 // begining of the ProfileData we intend to update to check its 1723 // type because we're right after it and we don't known its 1724 // length 1725 Label do_profile; 1726 ldrb(rscratch1, Address(rbcp, 0)); 1727 cmp(rscratch1, Bytecodes::_invokedynamic); 1728 br(Assembler::EQ, do_profile); 1729 cmp(rscratch1, Bytecodes::_invokehandle); 1730 br(Assembler::EQ, do_profile); 1731 get_method(tmp); 1732 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes())); 1733 cmp(rscratch1, vmIntrinsics::_compiledLambdaForm); 1734 br(Assembler::NE, profile_continue); 1735 1736 bind(do_profile); 1737 } 1738 1739 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); 1740 mov(tmp, ret); 1741 profile_obj_type(tmp, mdo_ret_addr); 1742 1743 bind(profile_continue); 1744 } 1745 } 1746 1747 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { 1748 if (ProfileInterpreter && MethodData::profile_parameters()) { 1749 Label profile_continue, done; 1750 1751 test_method_data_pointer(mdp, profile_continue); 1752 1753 // Load the offset of the area within the MDO used for 1754 // parameters. If it's negative we're not profiling any parameters 1755 ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); 1756 tbnz(tmp1, 63, profile_continue); // i.e. sign bit set 1757 1758 // Compute a pointer to the area for parameters from the offset 1759 // and move the pointer to the slot for the last 1760 // parameters. Collect profiling from last parameter down. 1761 // mdo start + parameters offset + array length - 1 1762 add(mdp, mdp, tmp1); 1763 ldr(tmp1, Address(mdp, ArrayData::array_len_offset())); 1764 sub(tmp1, tmp1, TypeStackSlotEntries::per_arg_count()); 1765 1766 Label loop; 1767 bind(loop); 1768 1769 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 1770 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 1771 int per_arg_scale = exact_log2(DataLayout::cell_size); 1772 add(rscratch1, mdp, off_base); 1773 add(rscratch2, mdp, type_base); 1774 1775 Address arg_off(rscratch1, tmp1, Address::lsl(per_arg_scale)); 1776 Address arg_type(rscratch2, tmp1, Address::lsl(per_arg_scale)); 1777 1778 // load offset on the stack from the slot for this parameter 1779 ldr(tmp2, arg_off); 1780 neg(tmp2, tmp2); 1781 // read the parameter from the local area 1782 ldr(tmp2, Address(rlocals, tmp2, Address::lsl(Interpreter::logStackElementSize))); 1783 1784 // profile the parameter 1785 profile_obj_type(tmp2, arg_type); 1786 1787 // go to next parameter 1788 subs(tmp1, tmp1, TypeStackSlotEntries::per_arg_count()); 1789 br(Assembler::GE, loop); 1790 1791 bind(profile_continue); 1792 } 1793 }