1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/shared/barrierSetAssembler.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/cpCache.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/synchronizer.hpp" 42 43 #define __ _masm-> 44 45 //---------------------------------------------------------------------------------------------------- 46 // Platform-dependent initialization 47 48 void TemplateTable::pd_initialize() { 49 // No arm specific initialization 50 } 51 52 //---------------------------------------------------------------------------------------------------- 53 // Address computation 54 55 // local variables 56 static inline Address iaddress(int n) { 57 return Address(Rlocals, Interpreter::local_offset_in_bytes(n)); 58 } 59 60 static inline Address laddress(int n) { return iaddress(n + 1); } 61 #ifndef AARCH64 62 static inline Address haddress(int n) { return iaddress(n + 0); } 63 #endif // !AARCH64 64 65 static inline Address faddress(int n) { return iaddress(n); } 66 static inline Address daddress(int n) { return laddress(n); } 67 static inline Address aaddress(int n) { return iaddress(n); } 68 69 70 void TemplateTable::get_local_base_addr(Register r, Register index) { 71 __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize)); 72 } 73 74 Address TemplateTable::load_iaddress(Register index, Register scratch) { 75 #ifdef AARCH64 76 get_local_base_addr(scratch, index); 77 return Address(scratch); 78 #else 79 return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset); 80 #endif // AARCH64 81 } 82 83 Address TemplateTable::load_aaddress(Register index, Register scratch) { 84 return load_iaddress(index, scratch); 85 } 86 87 Address TemplateTable::load_faddress(Register index, Register scratch) { 88 #ifdef __SOFTFP__ 89 return load_iaddress(index, scratch); 90 #else 91 get_local_base_addr(scratch, index); 92 return Address(scratch); 93 #endif // __SOFTFP__ 94 } 95 96 Address TemplateTable::load_daddress(Register index, Register scratch) { 97 get_local_base_addr(scratch, index); 98 return Address(scratch, Interpreter::local_offset_in_bytes(1)); 99 } 100 101 // At top of Java expression stack which may be different than SP. 102 // It isn't for category 1 objects. 103 static inline Address at_tos() { 104 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0)); 105 } 106 107 static inline Address at_tos_p1() { 108 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1)); 109 } 110 111 static inline Address at_tos_p2() { 112 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2)); 113 } 114 115 116 // 32-bit ARM: 117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two 118 // separate ldr instructions (supports nonadjacent values). 119 // Used for longs in all modes, and for doubles in SOFTFP mode. 120 // 121 // AArch64: loads long local into R0_tos. 122 // 123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) { 124 const Register Rlocal_base = tmp; 125 assert_different_registers(Rlocal_index, tmp); 126 127 get_local_base_addr(Rlocal_base, Rlocal_index); 128 #ifdef AARCH64 129 __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 130 #else 131 __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 132 __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0))); 133 #endif // AARCH64 134 } 135 136 137 // 32-bit ARM: 138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two 139 // separate str instructions (supports nonadjacent values). 140 // Used for longs in all modes, and for doubles in SOFTFP mode 141 // 142 // AArch64: stores R0_tos to long local. 143 // 144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) { 145 const Register Rlocal_base = tmp; 146 assert_different_registers(Rlocal_index, tmp); 147 148 get_local_base_addr(Rlocal_base, Rlocal_index); 149 #ifdef AARCH64 150 __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 151 #else 152 __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 153 __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0))); 154 #endif // AARCH64 155 } 156 157 // Returns address of Java array element using temp register as address base. 158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) { 159 int logElemSize = exact_log2(type2aelembytes(elemType)); 160 __ add_ptr_scaled_int32(temp, array, index, logElemSize); 161 return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType)); 162 } 163 164 // Returns address of Java array element using temp register as offset from array base 165 Address TemplateTable::get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp) { 166 int logElemSize = exact_log2(type2aelembytes(elemType)); 167 if (logElemSize == 0) { 168 __ add(temp, index, arrayOopDesc::base_offset_in_bytes(elemType)); 169 } else { 170 __ mov(temp, arrayOopDesc::base_offset_in_bytes(elemType)); 171 __ add_ptr_scaled_int32(temp, temp, index, logElemSize); 172 } 173 return Address(array, temp); 174 } 175 176 //---------------------------------------------------------------------------------------------------- 177 // Condition conversion 178 AsmCondition convNegCond(TemplateTable::Condition cc) { 179 switch (cc) { 180 case TemplateTable::equal : return ne; 181 case TemplateTable::not_equal : return eq; 182 case TemplateTable::less : return ge; 183 case TemplateTable::less_equal : return gt; 184 case TemplateTable::greater : return le; 185 case TemplateTable::greater_equal: return lt; 186 } 187 ShouldNotReachHere(); 188 return nv; 189 } 190 191 //---------------------------------------------------------------------------------------------------- 192 // Miscelaneous helper routines 193 194 // Store an oop (or NULL) at the address described by obj. 195 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). 196 // Also destroys new_val and obj.base(). 197 static void do_oop_store(InterpreterMacroAssembler* _masm, 198 Address obj, 199 Register new_val, 200 Register tmp1, 201 Register tmp2, 202 Register tmp3, 203 bool is_null, 204 DecoratorSet decorators = 0) { 205 206 assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg); 207 if (is_null) { 208 __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators); 209 } else { 210 __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators); 211 } 212 } 213 214 static void do_oop_load(InterpreterMacroAssembler* _masm, 215 Register dst, 216 Address obj, 217 DecoratorSet decorators = 0) { 218 __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators); 219 } 220 221 Address TemplateTable::at_bcp(int offset) { 222 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 223 return Address(Rbcp, offset); 224 } 225 226 227 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. 228 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 229 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 230 int byte_no) { 231 assert_different_registers(bc_reg, temp_reg); 232 if (!RewriteBytecodes) return; 233 Label L_patch_done; 234 235 switch (bc) { 236 case Bytecodes::_fast_aputfield: 237 case Bytecodes::_fast_bputfield: 238 case Bytecodes::_fast_zputfield: 239 case Bytecodes::_fast_cputfield: 240 case Bytecodes::_fast_dputfield: 241 case Bytecodes::_fast_fputfield: 242 case Bytecodes::_fast_iputfield: 243 case Bytecodes::_fast_lputfield: 244 case Bytecodes::_fast_sputfield: 245 { 246 // We skip bytecode quickening for putfield instructions when 247 // the put_code written to the constant pool cache is zero. 248 // This is required so that every execution of this instruction 249 // calls out to InterpreterRuntime::resolve_get_put to do 250 // additional, required work. 251 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 252 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 253 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2)); 254 __ mov(bc_reg, bc); 255 __ cbz(temp_reg, L_patch_done); // test if bytecode is zero 256 } 257 break; 258 default: 259 assert(byte_no == -1, "sanity"); 260 // the pair bytecodes have already done the load. 261 if (load_bc_into_bc_reg) { 262 __ mov(bc_reg, bc); 263 } 264 } 265 266 if (__ can_post_breakpoint()) { 267 Label L_fast_patch; 268 // if a breakpoint is present we can't rewrite the stream directly 269 __ ldrb(temp_reg, at_bcp(0)); 270 __ cmp(temp_reg, Bytecodes::_breakpoint); 271 __ b(L_fast_patch, ne); 272 if (bc_reg != R3) { 273 __ mov(R3, bc_reg); 274 } 275 __ mov(R1, Rmethod); 276 __ mov(R2, Rbcp); 277 // Let breakpoint table handling rewrite to quicker bytecode 278 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3); 279 __ b(L_patch_done); 280 __ bind(L_fast_patch); 281 } 282 283 #ifdef ASSERT 284 Label L_okay; 285 __ ldrb(temp_reg, at_bcp(0)); 286 __ cmp(temp_reg, (int)Bytecodes::java_code(bc)); 287 __ b(L_okay, eq); 288 __ cmp(temp_reg, bc_reg); 289 __ b(L_okay, eq); 290 __ stop("patching the wrong bytecode"); 291 __ bind(L_okay); 292 #endif 293 294 // patch bytecode 295 __ strb(bc_reg, at_bcp(0)); 296 __ bind(L_patch_done); 297 } 298 299 //---------------------------------------------------------------------------------------------------- 300 // Individual instructions 301 302 void TemplateTable::nop() { 303 transition(vtos, vtos); 304 // nothing to do 305 } 306 307 void TemplateTable::shouldnotreachhere() { 308 transition(vtos, vtos); 309 __ stop("shouldnotreachhere bytecode"); 310 } 311 312 313 314 void TemplateTable::aconst_null() { 315 transition(vtos, atos); 316 __ mov(R0_tos, 0); 317 } 318 319 320 void TemplateTable::iconst(int value) { 321 transition(vtos, itos); 322 __ mov_slow(R0_tos, value); 323 } 324 325 326 void TemplateTable::lconst(int value) { 327 transition(vtos, ltos); 328 assert((value == 0) || (value == 1), "unexpected long constant"); 329 __ mov(R0_tos, value); 330 #ifndef AARCH64 331 __ mov(R1_tos_hi, 0); 332 #endif // !AARCH64 333 } 334 335 336 void TemplateTable::fconst(int value) { 337 transition(vtos, ftos); 338 #ifdef AARCH64 339 switch(value) { 340 case 0: __ fmov_sw(S0_tos, ZR); break; 341 case 1: __ fmov_s (S0_tos, 0x70); break; 342 case 2: __ fmov_s (S0_tos, 0x00); break; 343 default: ShouldNotReachHere(); break; 344 } 345 #else 346 const int zero = 0; // 0.0f 347 const int one = 0x3f800000; // 1.0f 348 const int two = 0x40000000; // 2.0f 349 350 switch(value) { 351 case 0: __ mov(R0_tos, zero); break; 352 case 1: __ mov(R0_tos, one); break; 353 case 2: __ mov(R0_tos, two); break; 354 default: ShouldNotReachHere(); break; 355 } 356 357 #ifndef __SOFTFP__ 358 __ fmsr(S0_tos, R0_tos); 359 #endif // !__SOFTFP__ 360 #endif // AARCH64 361 } 362 363 364 void TemplateTable::dconst(int value) { 365 transition(vtos, dtos); 366 #ifdef AARCH64 367 switch(value) { 368 case 0: __ fmov_dx(D0_tos, ZR); break; 369 case 1: __ fmov_d (D0_tos, 0x70); break; 370 default: ShouldNotReachHere(); break; 371 } 372 #else 373 const int one_lo = 0; // low part of 1.0 374 const int one_hi = 0x3ff00000; // high part of 1.0 375 376 if (value == 0) { 377 #ifdef __SOFTFP__ 378 __ mov(R0_tos_lo, 0); 379 __ mov(R1_tos_hi, 0); 380 #else 381 __ mov(R0_tmp, 0); 382 __ fmdrr(D0_tos, R0_tmp, R0_tmp); 383 #endif // __SOFTFP__ 384 } else if (value == 1) { 385 __ mov(R0_tos_lo, one_lo); 386 __ mov_slow(R1_tos_hi, one_hi); 387 #ifndef __SOFTFP__ 388 __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi); 389 #endif // !__SOFTFP__ 390 } else { 391 ShouldNotReachHere(); 392 } 393 #endif // AARCH64 394 } 395 396 397 void TemplateTable::bipush() { 398 transition(vtos, itos); 399 __ ldrsb(R0_tos, at_bcp(1)); 400 } 401 402 403 void TemplateTable::sipush() { 404 transition(vtos, itos); 405 __ ldrsb(R0_tmp, at_bcp(1)); 406 __ ldrb(R1_tmp, at_bcp(2)); 407 __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 408 } 409 410 411 void TemplateTable::ldc(bool wide) { 412 transition(vtos, vtos); 413 Label fastCase, Condy, Done; 414 415 const Register Rindex = R1_tmp; 416 const Register Rcpool = R2_tmp; 417 const Register Rtags = R3_tmp; 418 const Register RtagType = R3_tmp; 419 420 if (wide) { 421 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 422 } else { 423 __ ldrb(Rindex, at_bcp(1)); 424 } 425 __ get_cpool_and_tags(Rcpool, Rtags); 426 427 const int base_offset = ConstantPool::header_size() * wordSize; 428 const int tags_offset = Array<u1>::base_offset_in_bytes(); 429 430 // get const type 431 __ add(Rtemp, Rtags, tags_offset); 432 #ifdef AARCH64 433 __ add(Rtemp, Rtemp, Rindex); 434 __ ldarb(RtagType, Rtemp); // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough 435 #else 436 __ ldrb(RtagType, Address(Rtemp, Rindex)); 437 volatile_barrier(MacroAssembler::LoadLoad, Rtemp); 438 #endif // AARCH64 439 440 // unresolved class - get the resolved class 441 __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass); 442 443 // unresolved class in error (resolution failed) - call into runtime 444 // so that the same error from first resolution attempt is thrown. 445 #ifdef AARCH64 446 __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint 447 __ cond_cmp(RtagType, Rtemp, ne); 448 #else 449 __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne); 450 #endif // AARCH64 451 452 // resolved class - need to call vm to get java mirror of the class 453 __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne); 454 455 __ b(fastCase, ne); 456 457 // slow case - call runtime 458 __ mov(R1, wide); 459 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1); 460 __ push(atos); 461 __ b(Done); 462 463 // int, float, String 464 __ bind(fastCase); 465 466 __ cmp(RtagType, JVM_CONSTANT_Integer); 467 __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne); 468 __ b(Condy, ne); 469 470 // itos, ftos 471 __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); 472 __ ldr_u32(R0_tos, Address(Rtemp, base_offset)); 473 474 // floats and ints are placed on stack in the same way, so 475 // we can use push(itos) to transfer float value without VFP 476 __ push(itos); 477 __ b(Done); 478 479 __ bind(Condy); 480 condy_helper(Done); 481 482 __ bind(Done); 483 } 484 485 // Fast path for caching oop constants. 486 void TemplateTable::fast_aldc(bool wide) { 487 transition(vtos, atos); 488 int index_size = wide ? sizeof(u2) : sizeof(u1); 489 Label resolved; 490 491 // We are resolved if the resolved reference cache entry contains a 492 // non-null object (CallSite, etc.) 493 assert_different_registers(R0_tos, R2_tmp); 494 __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size); 495 __ load_resolved_reference_at_index(R0_tos, R2_tmp); 496 __ cbnz(R0_tos, resolved); 497 498 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 499 500 // first time invocation - must resolve first 501 __ mov(R1, (int)bytecode()); 502 __ call_VM(R0_tos, entry, R1); 503 __ bind(resolved); 504 505 { // Check for the null sentinel. 506 // If we just called the VM, that already did the mapping for us, 507 // but it's harmless to retry. 508 Label notNull; 509 Register result = R0; 510 Register tmp = R1; 511 Register rarg = R2; 512 513 // Stash null_sentinel address to get its value later 514 __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr()); 515 __ ldr(tmp, Address(rarg)); 516 __ cmp(result, tmp); 517 __ b(notNull, ne); 518 __ mov(result, 0); // NULL object reference 519 __ bind(notNull); 520 } 521 522 if (VerifyOops) { 523 __ verify_oop(R0_tos); 524 } 525 } 526 527 void TemplateTable::ldc2_w() { 528 transition(vtos, vtos); 529 const Register Rtags = R2_tmp; 530 const Register Rindex = R3_tmp; 531 const Register Rcpool = R4_tmp; 532 const Register Rbase = R5_tmp; 533 534 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 535 536 __ get_cpool_and_tags(Rcpool, Rtags); 537 const int base_offset = ConstantPool::header_size() * wordSize; 538 const int tags_offset = Array<u1>::base_offset_in_bytes(); 539 540 __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); 541 542 Label Condy, exit; 543 #ifdef __ABI_HARD__ 544 Label Long; 545 // get type from tags 546 __ add(Rtemp, Rtags, tags_offset); 547 __ ldrb(Rtemp, Address(Rtemp, Rindex)); 548 __ cmp(Rtemp, JVM_CONSTANT_Double); 549 __ b(Long, ne); 550 __ ldr_double(D0_tos, Address(Rbase, base_offset)); 551 552 __ push(dtos); 553 __ b(exit); 554 __ bind(Long); 555 #endif 556 557 __ cmp(Rtemp, JVM_CONSTANT_Long); 558 __ b(Condy, ne); 559 #ifdef AARCH64 560 __ ldr(R0_tos, Address(Rbase, base_offset)); 561 #else 562 __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize)); 563 __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize)); 564 #endif // AARCH64 565 __ push(ltos); 566 __ b(exit); 567 568 __ bind(Condy); 569 condy_helper(exit); 570 571 __ bind(exit); 572 } 573 574 575 void TemplateTable::condy_helper(Label& Done) 576 { 577 Register obj = R0_tmp; 578 Register rtmp = R1_tmp; 579 Register flags = R2_tmp; 580 Register off = R3_tmp; 581 582 __ mov(rtmp, (int) bytecode()); 583 __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp); 584 __ get_vm_result_2(flags, rtmp); 585 586 // VMr = obj = base address to find primitive value to push 587 // VMr2 = flags = (tos, off) using format of CPCE::_flags 588 __ mov(off, flags); 589 590 #ifdef AARCH64 591 __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask); 592 #else 593 __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits); 594 __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits); 595 #endif 596 597 const Address field(obj, off); 598 599 __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift); 600 // Make sure we don't need to mask flags after the above shift 601 ConstantPoolCacheEntry::verify_tos_state_shift(); 602 603 switch (bytecode()) { 604 case Bytecodes::_ldc: 605 case Bytecodes::_ldc_w: 606 { 607 // tos in (itos, ftos, stos, btos, ctos, ztos) 608 Label notIntFloat, notShort, notByte, notChar, notBool; 609 __ cmp(flags, itos); 610 __ cond_cmp(flags, ftos, ne); 611 __ b(notIntFloat, ne); 612 __ ldr(R0_tos, field); 613 __ push(itos); 614 __ b(Done); 615 616 __ bind(notIntFloat); 617 __ cmp(flags, stos); 618 __ b(notShort, ne); 619 __ ldrsh(R0_tos, field); 620 __ push(stos); 621 __ b(Done); 622 623 __ bind(notShort); 624 __ cmp(flags, btos); 625 __ b(notByte, ne); 626 __ ldrsb(R0_tos, field); 627 __ push(btos); 628 __ b(Done); 629 630 __ bind(notByte); 631 __ cmp(flags, ctos); 632 __ b(notChar, ne); 633 __ ldrh(R0_tos, field); 634 __ push(ctos); 635 __ b(Done); 636 637 __ bind(notChar); 638 __ cmp(flags, ztos); 639 __ b(notBool, ne); 640 __ ldrsb(R0_tos, field); 641 __ push(ztos); 642 __ b(Done); 643 644 __ bind(notBool); 645 break; 646 } 647 648 case Bytecodes::_ldc2_w: 649 { 650 Label notLongDouble; 651 __ cmp(flags, ltos); 652 __ cond_cmp(flags, dtos, ne); 653 __ b(notLongDouble, ne); 654 655 #ifdef AARCH64 656 __ ldr(R0_tos, field); 657 #else 658 __ add(rtmp, obj, wordSize); 659 __ ldr(R0_tos_lo, Address(obj, off)); 660 __ ldr(R1_tos_hi, Address(rtmp, off)); 661 #endif 662 __ push(ltos); 663 __ b(Done); 664 665 __ bind(notLongDouble); 666 667 break; 668 } 669 670 default: 671 ShouldNotReachHere(); 672 } 673 674 __ stop("bad ldc/condy"); 675 } 676 677 678 void TemplateTable::locals_index(Register reg, int offset) { 679 __ ldrb(reg, at_bcp(offset)); 680 } 681 682 void TemplateTable::iload() { 683 iload_internal(); 684 } 685 686 void TemplateTable::nofast_iload() { 687 iload_internal(may_not_rewrite); 688 } 689 690 void TemplateTable::iload_internal(RewriteControl rc) { 691 transition(vtos, itos); 692 693 if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) { 694 Label rewrite, done; 695 const Register next_bytecode = R1_tmp; 696 const Register target_bytecode = R2_tmp; 697 698 // get next byte 699 __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); 700 // if _iload, wait to rewrite to iload2. We only want to rewrite the 701 // last two iloads in a pair. Comparing against fast_iload means that 702 // the next bytecode is neither an iload or a caload, and therefore 703 // an iload pair. 704 __ cmp(next_bytecode, Bytecodes::_iload); 705 __ b(done, eq); 706 707 __ cmp(next_bytecode, Bytecodes::_fast_iload); 708 __ mov(target_bytecode, Bytecodes::_fast_iload2); 709 __ b(rewrite, eq); 710 711 // if _caload, rewrite to fast_icaload 712 __ cmp(next_bytecode, Bytecodes::_caload); 713 __ mov(target_bytecode, Bytecodes::_fast_icaload); 714 __ b(rewrite, eq); 715 716 // rewrite so iload doesn't check again. 717 __ mov(target_bytecode, Bytecodes::_fast_iload); 718 719 // rewrite 720 // R2: fast bytecode 721 __ bind(rewrite); 722 patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false); 723 __ bind(done); 724 } 725 726 // Get the local value into tos 727 const Register Rlocal_index = R1_tmp; 728 locals_index(Rlocal_index); 729 Address local = load_iaddress(Rlocal_index, Rtemp); 730 __ ldr_s32(R0_tos, local); 731 } 732 733 734 void TemplateTable::fast_iload2() { 735 transition(vtos, itos); 736 const Register Rlocal_index = R1_tmp; 737 738 locals_index(Rlocal_index); 739 Address local = load_iaddress(Rlocal_index, Rtemp); 740 __ ldr_s32(R0_tos, local); 741 __ push(itos); 742 743 locals_index(Rlocal_index, 3); 744 local = load_iaddress(Rlocal_index, Rtemp); 745 __ ldr_s32(R0_tos, local); 746 } 747 748 void TemplateTable::fast_iload() { 749 transition(vtos, itos); 750 const Register Rlocal_index = R1_tmp; 751 752 locals_index(Rlocal_index); 753 Address local = load_iaddress(Rlocal_index, Rtemp); 754 __ ldr_s32(R0_tos, local); 755 } 756 757 758 void TemplateTable::lload() { 759 transition(vtos, ltos); 760 const Register Rlocal_index = R2_tmp; 761 762 locals_index(Rlocal_index); 763 load_category2_local(Rlocal_index, R3_tmp); 764 } 765 766 767 void TemplateTable::fload() { 768 transition(vtos, ftos); 769 const Register Rlocal_index = R2_tmp; 770 771 // Get the local value into tos 772 locals_index(Rlocal_index); 773 Address local = load_faddress(Rlocal_index, Rtemp); 774 #ifdef __SOFTFP__ 775 __ ldr(R0_tos, local); 776 #else 777 __ ldr_float(S0_tos, local); 778 #endif // __SOFTFP__ 779 } 780 781 782 void TemplateTable::dload() { 783 transition(vtos, dtos); 784 const Register Rlocal_index = R2_tmp; 785 786 locals_index(Rlocal_index); 787 788 #ifdef __SOFTFP__ 789 load_category2_local(Rlocal_index, R3_tmp); 790 #else 791 __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp)); 792 #endif // __SOFTFP__ 793 } 794 795 796 void TemplateTable::aload() { 797 transition(vtos, atos); 798 const Register Rlocal_index = R1_tmp; 799 800 locals_index(Rlocal_index); 801 Address local = load_aaddress(Rlocal_index, Rtemp); 802 __ ldr(R0_tos, local); 803 } 804 805 806 void TemplateTable::locals_index_wide(Register reg) { 807 assert_different_registers(reg, Rtemp); 808 __ ldrb(Rtemp, at_bcp(2)); 809 __ ldrb(reg, at_bcp(3)); 810 __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8)); 811 } 812 813 814 void TemplateTable::wide_iload() { 815 transition(vtos, itos); 816 const Register Rlocal_index = R2_tmp; 817 818 locals_index_wide(Rlocal_index); 819 Address local = load_iaddress(Rlocal_index, Rtemp); 820 __ ldr_s32(R0_tos, local); 821 } 822 823 824 void TemplateTable::wide_lload() { 825 transition(vtos, ltos); 826 const Register Rlocal_index = R2_tmp; 827 const Register Rlocal_base = R3_tmp; 828 829 locals_index_wide(Rlocal_index); 830 load_category2_local(Rlocal_index, R3_tmp); 831 } 832 833 834 void TemplateTable::wide_fload() { 835 transition(vtos, ftos); 836 const Register Rlocal_index = R2_tmp; 837 838 locals_index_wide(Rlocal_index); 839 Address local = load_faddress(Rlocal_index, Rtemp); 840 #ifdef __SOFTFP__ 841 __ ldr(R0_tos, local); 842 #else 843 __ ldr_float(S0_tos, local); 844 #endif // __SOFTFP__ 845 } 846 847 848 void TemplateTable::wide_dload() { 849 transition(vtos, dtos); 850 const Register Rlocal_index = R2_tmp; 851 852 locals_index_wide(Rlocal_index); 853 #ifdef __SOFTFP__ 854 load_category2_local(Rlocal_index, R3_tmp); 855 #else 856 __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp)); 857 #endif // __SOFTFP__ 858 } 859 860 861 void TemplateTable::wide_aload() { 862 transition(vtos, atos); 863 const Register Rlocal_index = R2_tmp; 864 865 locals_index_wide(Rlocal_index); 866 Address local = load_aaddress(Rlocal_index, Rtemp); 867 __ ldr(R0_tos, local); 868 } 869 870 void TemplateTable::index_check(Register array, Register index) { 871 // Pop ptr into array 872 __ pop_ptr(array); 873 index_check_without_pop(array, index); 874 } 875 876 void TemplateTable::index_check_without_pop(Register array, Register index) { 877 assert_different_registers(array, index, Rtemp); 878 // check array 879 __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes()); 880 // check index 881 __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes())); 882 __ cmp_32(index, Rtemp); 883 if (index != R4_ArrayIndexOutOfBounds_index) { 884 // convention with generate_ArrayIndexOutOfBounds_handler() 885 __ mov(R4_ArrayIndexOutOfBounds_index, index, hs); 886 } 887 __ mov(R1, array, hs); 888 __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs); 889 } 890 891 892 void TemplateTable::iaload() { 893 transition(itos, itos); 894 const Register Rarray = R1_tmp; 895 const Register Rindex = R0_tos; 896 897 index_check(Rarray, Rindex); 898 Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp); 899 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg); 900 } 901 902 903 void TemplateTable::laload() { 904 transition(itos, ltos); 905 const Register Rarray = R1_tmp; 906 const Register Rindex = R0_tos; 907 908 index_check(Rarray, Rindex); 909 910 #ifdef AARCH64 911 __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp)); 912 #else 913 Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp); 914 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg); 915 #endif // AARCH64 916 } 917 918 919 void TemplateTable::faload() { 920 transition(itos, ftos); 921 const Register Rarray = R1_tmp; 922 const Register Rindex = R0_tos; 923 924 index_check(Rarray, Rindex); 925 926 Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp); 927 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg); 928 } 929 930 931 void TemplateTable::daload() { 932 transition(itos, dtos); 933 const Register Rarray = R1_tmp; 934 const Register Rindex = R0_tos; 935 936 index_check(Rarray, Rindex); 937 938 Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp); 939 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg); 940 } 941 942 943 void TemplateTable::aaload() { 944 transition(itos, atos); 945 const Register Rarray = R1_tmp; 946 const Register Rindex = R0_tos; 947 948 index_check(Rarray, Rindex); 949 do_oop_load(_masm, R0_tos, get_array_elem_addr_same_base(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY); 950 } 951 952 953 void TemplateTable::baload() { 954 transition(itos, itos); 955 const Register Rarray = R1_tmp; 956 const Register Rindex = R0_tos; 957 958 index_check(Rarray, Rindex); 959 Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp); 960 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg); 961 } 962 963 964 void TemplateTable::caload() { 965 transition(itos, itos); 966 const Register Rarray = R1_tmp; 967 const Register Rindex = R0_tos; 968 969 index_check(Rarray, Rindex); 970 Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp); 971 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg); 972 } 973 974 975 // iload followed by caload frequent pair 976 void TemplateTable::fast_icaload() { 977 transition(vtos, itos); 978 const Register Rlocal_index = R1_tmp; 979 const Register Rarray = R1_tmp; 980 const Register Rindex = R4_tmp; // index_check prefers index on R4 981 assert_different_registers(Rlocal_index, Rindex); 982 assert_different_registers(Rarray, Rindex); 983 984 // load index out of locals 985 locals_index(Rlocal_index); 986 Address local = load_iaddress(Rlocal_index, Rtemp); 987 __ ldr_s32(Rindex, local); 988 989 // get array element 990 index_check(Rarray, Rindex); 991 Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp); 992 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg); 993 } 994 995 996 void TemplateTable::saload() { 997 transition(itos, itos); 998 const Register Rarray = R1_tmp; 999 const Register Rindex = R0_tos; 1000 1001 index_check(Rarray, Rindex); 1002 Address addr = get_array_elem_addr_same_base(T_SHORT, Rarray, Rindex, Rtemp); 1003 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg); 1004 } 1005 1006 1007 void TemplateTable::iload(int n) { 1008 transition(vtos, itos); 1009 __ ldr_s32(R0_tos, iaddress(n)); 1010 } 1011 1012 1013 void TemplateTable::lload(int n) { 1014 transition(vtos, ltos); 1015 #ifdef AARCH64 1016 __ ldr(R0_tos, laddress(n)); 1017 #else 1018 __ ldr(R0_tos_lo, laddress(n)); 1019 __ ldr(R1_tos_hi, haddress(n)); 1020 #endif // AARCH64 1021 } 1022 1023 1024 void TemplateTable::fload(int n) { 1025 transition(vtos, ftos); 1026 #ifdef __SOFTFP__ 1027 __ ldr(R0_tos, faddress(n)); 1028 #else 1029 __ ldr_float(S0_tos, faddress(n)); 1030 #endif // __SOFTFP__ 1031 } 1032 1033 1034 void TemplateTable::dload(int n) { 1035 transition(vtos, dtos); 1036 #ifdef __SOFTFP__ 1037 __ ldr(R0_tos_lo, laddress(n)); 1038 __ ldr(R1_tos_hi, haddress(n)); 1039 #else 1040 __ ldr_double(D0_tos, daddress(n)); 1041 #endif // __SOFTFP__ 1042 } 1043 1044 1045 void TemplateTable::aload(int n) { 1046 transition(vtos, atos); 1047 __ ldr(R0_tos, aaddress(n)); 1048 } 1049 1050 void TemplateTable::aload_0() { 1051 aload_0_internal(); 1052 } 1053 1054 void TemplateTable::nofast_aload_0() { 1055 aload_0_internal(may_not_rewrite); 1056 } 1057 1058 void TemplateTable::aload_0_internal(RewriteControl rc) { 1059 transition(vtos, atos); 1060 // According to bytecode histograms, the pairs: 1061 // 1062 // _aload_0, _fast_igetfield 1063 // _aload_0, _fast_agetfield 1064 // _aload_0, _fast_fgetfield 1065 // 1066 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 1067 // bytecode checks if the next bytecode is either _fast_igetfield, 1068 // _fast_agetfield or _fast_fgetfield and then rewrites the 1069 // current bytecode into a pair bytecode; otherwise it rewrites the current 1070 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 1071 // 1072 // Note: If the next bytecode is _getfield, the rewrite must be delayed, 1073 // otherwise we may miss an opportunity for a pair. 1074 // 1075 // Also rewrite frequent pairs 1076 // aload_0, aload_1 1077 // aload_0, iload_1 1078 // These bytecodes with a small amount of code are most profitable to rewrite 1079 if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) { 1080 Label rewrite, done; 1081 const Register next_bytecode = R1_tmp; 1082 const Register target_bytecode = R2_tmp; 1083 1084 // get next byte 1085 __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); 1086 1087 // if _getfield then wait with rewrite 1088 __ cmp(next_bytecode, Bytecodes::_getfield); 1089 __ b(done, eq); 1090 1091 // if _igetfield then rewrite to _fast_iaccess_0 1092 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1093 __ cmp(next_bytecode, Bytecodes::_fast_igetfield); 1094 __ mov(target_bytecode, Bytecodes::_fast_iaccess_0); 1095 __ b(rewrite, eq); 1096 1097 // if _agetfield then rewrite to _fast_aaccess_0 1098 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1099 __ cmp(next_bytecode, Bytecodes::_fast_agetfield); 1100 __ mov(target_bytecode, Bytecodes::_fast_aaccess_0); 1101 __ b(rewrite, eq); 1102 1103 // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0 1104 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1105 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1106 1107 __ cmp(next_bytecode, Bytecodes::_fast_fgetfield); 1108 #ifdef AARCH64 1109 __ mov(Rtemp, Bytecodes::_fast_faccess_0); 1110 __ mov(target_bytecode, Bytecodes::_fast_aload_0); 1111 __ mov(target_bytecode, Rtemp, eq); 1112 #else 1113 __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq); 1114 __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne); 1115 #endif // AARCH64 1116 1117 // rewrite 1118 __ bind(rewrite); 1119 patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false); 1120 1121 __ bind(done); 1122 } 1123 1124 aload(0); 1125 } 1126 1127 void TemplateTable::istore() { 1128 transition(itos, vtos); 1129 const Register Rlocal_index = R2_tmp; 1130 1131 locals_index(Rlocal_index); 1132 Address local = load_iaddress(Rlocal_index, Rtemp); 1133 __ str_32(R0_tos, local); 1134 } 1135 1136 1137 void TemplateTable::lstore() { 1138 transition(ltos, vtos); 1139 const Register Rlocal_index = R2_tmp; 1140 1141 locals_index(Rlocal_index); 1142 store_category2_local(Rlocal_index, R3_tmp); 1143 } 1144 1145 1146 void TemplateTable::fstore() { 1147 transition(ftos, vtos); 1148 const Register Rlocal_index = R2_tmp; 1149 1150 locals_index(Rlocal_index); 1151 Address local = load_faddress(Rlocal_index, Rtemp); 1152 #ifdef __SOFTFP__ 1153 __ str(R0_tos, local); 1154 #else 1155 __ str_float(S0_tos, local); 1156 #endif // __SOFTFP__ 1157 } 1158 1159 1160 void TemplateTable::dstore() { 1161 transition(dtos, vtos); 1162 const Register Rlocal_index = R2_tmp; 1163 1164 locals_index(Rlocal_index); 1165 1166 #ifdef __SOFTFP__ 1167 store_category2_local(Rlocal_index, R3_tmp); 1168 #else 1169 __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp)); 1170 #endif // __SOFTFP__ 1171 } 1172 1173 1174 void TemplateTable::astore() { 1175 transition(vtos, vtos); 1176 const Register Rlocal_index = R1_tmp; 1177 1178 __ pop_ptr(R0_tos); 1179 locals_index(Rlocal_index); 1180 Address local = load_aaddress(Rlocal_index, Rtemp); 1181 __ str(R0_tos, local); 1182 } 1183 1184 1185 void TemplateTable::wide_istore() { 1186 transition(vtos, vtos); 1187 const Register Rlocal_index = R2_tmp; 1188 1189 __ pop_i(R0_tos); 1190 locals_index_wide(Rlocal_index); 1191 Address local = load_iaddress(Rlocal_index, Rtemp); 1192 __ str_32(R0_tos, local); 1193 } 1194 1195 1196 void TemplateTable::wide_lstore() { 1197 transition(vtos, vtos); 1198 const Register Rlocal_index = R2_tmp; 1199 const Register Rlocal_base = R3_tmp; 1200 1201 #ifdef AARCH64 1202 __ pop_l(R0_tos); 1203 #else 1204 __ pop_l(R0_tos_lo, R1_tos_hi); 1205 #endif // AARCH64 1206 1207 locals_index_wide(Rlocal_index); 1208 store_category2_local(Rlocal_index, R3_tmp); 1209 } 1210 1211 1212 void TemplateTable::wide_fstore() { 1213 wide_istore(); 1214 } 1215 1216 1217 void TemplateTable::wide_dstore() { 1218 wide_lstore(); 1219 } 1220 1221 1222 void TemplateTable::wide_astore() { 1223 transition(vtos, vtos); 1224 const Register Rlocal_index = R2_tmp; 1225 1226 __ pop_ptr(R0_tos); 1227 locals_index_wide(Rlocal_index); 1228 Address local = load_aaddress(Rlocal_index, Rtemp); 1229 __ str(R0_tos, local); 1230 } 1231 1232 1233 void TemplateTable::iastore() { 1234 transition(itos, vtos); 1235 const Register Rindex = R4_tmp; // index_check prefers index in R4 1236 const Register Rarray = R3_tmp; 1237 // R0_tos: value 1238 1239 __ pop_i(Rindex); 1240 index_check(Rarray, Rindex); 1241 Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp); 1242 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false); 1243 } 1244 1245 1246 void TemplateTable::lastore() { 1247 transition(ltos, vtos); 1248 const Register Rindex = R4_tmp; // index_check prefers index in R4 1249 const Register Rarray = R3_tmp; 1250 // R0_tos_lo:R1_tos_hi: value 1251 1252 __ pop_i(Rindex); 1253 index_check(Rarray, Rindex); 1254 1255 #ifdef AARCH64 1256 __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp)); 1257 #else 1258 Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp); 1259 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false); 1260 #endif // AARCH64 1261 } 1262 1263 1264 void TemplateTable::fastore() { 1265 transition(ftos, vtos); 1266 const Register Rindex = R4_tmp; // index_check prefers index in R4 1267 const Register Rarray = R3_tmp; 1268 // S0_tos/R0_tos: value 1269 1270 __ pop_i(Rindex); 1271 index_check(Rarray, Rindex); 1272 Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp); 1273 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg, false); 1274 } 1275 1276 1277 void TemplateTable::dastore() { 1278 transition(dtos, vtos); 1279 const Register Rindex = R4_tmp; // index_check prefers index in R4 1280 const Register Rarray = R3_tmp; 1281 // D0_tos / R0_tos_lo:R1_to_hi: value 1282 1283 __ pop_i(Rindex); 1284 index_check(Rarray, Rindex); 1285 1286 Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp); 1287 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg, false); 1288 } 1289 1290 1291 void TemplateTable::aastore() { 1292 transition(vtos, vtos); 1293 Label is_null, throw_array_store, done; 1294 1295 const Register Raddr_1 = R1_tmp; 1296 const Register Rvalue_2 = R2_tmp; 1297 const Register Rarray_3 = R3_tmp; 1298 const Register Rindex_4 = R4_tmp; // preferred by index_check_without_pop() 1299 const Register Rsub_5 = R5_tmp; 1300 const Register Rsuper_LR = LR_tmp; 1301 1302 // stack: ..., array, index, value 1303 __ ldr(Rvalue_2, at_tos()); // Value 1304 __ ldr_s32(Rindex_4, at_tos_p1()); // Index 1305 __ ldr(Rarray_3, at_tos_p2()); // Array 1306 1307 index_check_without_pop(Rarray_3, Rindex_4); 1308 1309 // Compute the array base 1310 __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 1311 1312 // do array store check - check for NULL value first 1313 __ cbz(Rvalue_2, is_null); 1314 1315 // Load subklass 1316 __ load_klass(Rsub_5, Rvalue_2); 1317 // Load superklass 1318 __ load_klass(Rtemp, Rarray_3); 1319 __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset())); 1320 1321 __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp); 1322 // Come here on success 1323 1324 // Store value 1325 __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop)); 1326 1327 // Now store using the appropriate barrier 1328 do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY); 1329 __ b(done); 1330 1331 __ bind(throw_array_store); 1332 1333 // Come here on failure of subtype check 1334 __ profile_typecheck_failed(R0_tmp); 1335 1336 // object is at TOS 1337 __ b(Interpreter::_throw_ArrayStoreException_entry); 1338 1339 // Have a NULL in Rvalue_2, store NULL at array[index]. 1340 __ bind(is_null); 1341 __ profile_null_seen(R0_tmp); 1342 1343 // Store a NULL 1344 do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY); 1345 1346 // Pop stack arguments 1347 __ bind(done); 1348 __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize); 1349 } 1350 1351 1352 void TemplateTable::bastore() { 1353 transition(itos, vtos); 1354 const Register Rindex = R4_tmp; // index_check prefers index in R4 1355 const Register Rarray = R3_tmp; 1356 // R0_tos: value 1357 1358 __ pop_i(Rindex); 1359 index_check(Rarray, Rindex); 1360 1361 // Need to check whether array is boolean or byte 1362 // since both types share the bastore bytecode. 1363 __ load_klass(Rtemp, Rarray); 1364 __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset())); 1365 Label L_skip; 1366 __ tst(Rtemp, Klass::layout_helper_boolean_diffbit()); 1367 __ b(L_skip, eq); 1368 __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1369 __ bind(L_skip); 1370 Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp); 1371 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false); 1372 } 1373 1374 1375 void TemplateTable::castore() { 1376 transition(itos, vtos); 1377 const Register Rindex = R4_tmp; // index_check prefers index in R4 1378 const Register Rarray = R3_tmp; 1379 // R0_tos: value 1380 1381 __ pop_i(Rindex); 1382 index_check(Rarray, Rindex); 1383 Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp); 1384 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false); 1385 } 1386 1387 1388 void TemplateTable::sastore() { 1389 assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) == 1390 arrayOopDesc::base_offset_in_bytes(T_SHORT), 1391 "base offsets for char and short should be equal"); 1392 castore(); 1393 } 1394 1395 1396 void TemplateTable::istore(int n) { 1397 transition(itos, vtos); 1398 __ str_32(R0_tos, iaddress(n)); 1399 } 1400 1401 1402 void TemplateTable::lstore(int n) { 1403 transition(ltos, vtos); 1404 #ifdef AARCH64 1405 __ str(R0_tos, laddress(n)); 1406 #else 1407 __ str(R0_tos_lo, laddress(n)); 1408 __ str(R1_tos_hi, haddress(n)); 1409 #endif // AARCH64 1410 } 1411 1412 1413 void TemplateTable::fstore(int n) { 1414 transition(ftos, vtos); 1415 #ifdef __SOFTFP__ 1416 __ str(R0_tos, faddress(n)); 1417 #else 1418 __ str_float(S0_tos, faddress(n)); 1419 #endif // __SOFTFP__ 1420 } 1421 1422 1423 void TemplateTable::dstore(int n) { 1424 transition(dtos, vtos); 1425 #ifdef __SOFTFP__ 1426 __ str(R0_tos_lo, laddress(n)); 1427 __ str(R1_tos_hi, haddress(n)); 1428 #else 1429 __ str_double(D0_tos, daddress(n)); 1430 #endif // __SOFTFP__ 1431 } 1432 1433 1434 void TemplateTable::astore(int n) { 1435 transition(vtos, vtos); 1436 __ pop_ptr(R0_tos); 1437 __ str(R0_tos, aaddress(n)); 1438 } 1439 1440 1441 void TemplateTable::pop() { 1442 transition(vtos, vtos); 1443 __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize); 1444 } 1445 1446 1447 void TemplateTable::pop2() { 1448 transition(vtos, vtos); 1449 __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize); 1450 } 1451 1452 1453 void TemplateTable::dup() { 1454 transition(vtos, vtos); 1455 // stack: ..., a 1456 __ load_ptr(0, R0_tmp); 1457 __ push_ptr(R0_tmp); 1458 // stack: ..., a, a 1459 } 1460 1461 1462 void TemplateTable::dup_x1() { 1463 transition(vtos, vtos); 1464 // stack: ..., a, b 1465 __ load_ptr(0, R0_tmp); // load b 1466 __ load_ptr(1, R2_tmp); // load a 1467 __ store_ptr(1, R0_tmp); // store b 1468 __ store_ptr(0, R2_tmp); // store a 1469 __ push_ptr(R0_tmp); // push b 1470 // stack: ..., b, a, b 1471 } 1472 1473 1474 void TemplateTable::dup_x2() { 1475 transition(vtos, vtos); 1476 // stack: ..., a, b, c 1477 __ load_ptr(0, R0_tmp); // load c 1478 __ load_ptr(1, R2_tmp); // load b 1479 __ load_ptr(2, R4_tmp); // load a 1480 1481 __ push_ptr(R0_tmp); // push c 1482 1483 // stack: ..., a, b, c, c 1484 __ store_ptr(1, R2_tmp); // store b 1485 __ store_ptr(2, R4_tmp); // store a 1486 __ store_ptr(3, R0_tmp); // store c 1487 // stack: ..., c, a, b, c 1488 } 1489 1490 1491 void TemplateTable::dup2() { 1492 transition(vtos, vtos); 1493 // stack: ..., a, b 1494 __ load_ptr(1, R0_tmp); // load a 1495 __ push_ptr(R0_tmp); // push a 1496 __ load_ptr(1, R0_tmp); // load b 1497 __ push_ptr(R0_tmp); // push b 1498 // stack: ..., a, b, a, b 1499 } 1500 1501 1502 void TemplateTable::dup2_x1() { 1503 transition(vtos, vtos); 1504 1505 // stack: ..., a, b, c 1506 __ load_ptr(0, R4_tmp); // load c 1507 __ load_ptr(1, R2_tmp); // load b 1508 __ load_ptr(2, R0_tmp); // load a 1509 1510 __ push_ptr(R2_tmp); // push b 1511 __ push_ptr(R4_tmp); // push c 1512 1513 // stack: ..., a, b, c, b, c 1514 1515 __ store_ptr(2, R0_tmp); // store a 1516 __ store_ptr(3, R4_tmp); // store c 1517 __ store_ptr(4, R2_tmp); // store b 1518 1519 // stack: ..., b, c, a, b, c 1520 } 1521 1522 1523 void TemplateTable::dup2_x2() { 1524 transition(vtos, vtos); 1525 // stack: ..., a, b, c, d 1526 __ load_ptr(0, R0_tmp); // load d 1527 __ load_ptr(1, R2_tmp); // load c 1528 __ push_ptr(R2_tmp); // push c 1529 __ push_ptr(R0_tmp); // push d 1530 // stack: ..., a, b, c, d, c, d 1531 __ load_ptr(4, R4_tmp); // load b 1532 __ store_ptr(4, R0_tmp); // store d in b 1533 __ store_ptr(2, R4_tmp); // store b in d 1534 // stack: ..., a, d, c, b, c, d 1535 __ load_ptr(5, R4_tmp); // load a 1536 __ store_ptr(5, R2_tmp); // store c in a 1537 __ store_ptr(3, R4_tmp); // store a in c 1538 // stack: ..., c, d, a, b, c, d 1539 } 1540 1541 1542 void TemplateTable::swap() { 1543 transition(vtos, vtos); 1544 // stack: ..., a, b 1545 __ load_ptr(1, R0_tmp); // load a 1546 __ load_ptr(0, R2_tmp); // load b 1547 __ store_ptr(0, R0_tmp); // store a in b 1548 __ store_ptr(1, R2_tmp); // store b in a 1549 // stack: ..., b, a 1550 } 1551 1552 1553 void TemplateTable::iop2(Operation op) { 1554 transition(itos, itos); 1555 const Register arg1 = R1_tmp; 1556 const Register arg2 = R0_tos; 1557 1558 __ pop_i(arg1); 1559 switch (op) { 1560 case add : __ add_32 (R0_tos, arg1, arg2); break; 1561 case sub : __ sub_32 (R0_tos, arg1, arg2); break; 1562 case mul : __ mul_32 (R0_tos, arg1, arg2); break; 1563 case _and : __ and_32 (R0_tos, arg1, arg2); break; 1564 case _or : __ orr_32 (R0_tos, arg1, arg2); break; 1565 case _xor : __ eor_32 (R0_tos, arg1, arg2); break; 1566 #ifdef AARCH64 1567 case shl : __ lslv_w (R0_tos, arg1, arg2); break; 1568 case shr : __ asrv_w (R0_tos, arg1, arg2); break; 1569 case ushr : __ lsrv_w (R0_tos, arg1, arg2); break; 1570 #else 1571 case shl : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break; 1572 case shr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break; 1573 case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break; 1574 #endif // AARCH64 1575 default : ShouldNotReachHere(); 1576 } 1577 } 1578 1579 1580 void TemplateTable::lop2(Operation op) { 1581 transition(ltos, ltos); 1582 #ifdef AARCH64 1583 const Register arg1 = R1_tmp; 1584 const Register arg2 = R0_tos; 1585 1586 __ pop_l(arg1); 1587 switch (op) { 1588 case add : __ add (R0_tos, arg1, arg2); break; 1589 case sub : __ sub (R0_tos, arg1, arg2); break; 1590 case _and : __ andr(R0_tos, arg1, arg2); break; 1591 case _or : __ orr (R0_tos, arg1, arg2); break; 1592 case _xor : __ eor (R0_tos, arg1, arg2); break; 1593 default : ShouldNotReachHere(); 1594 } 1595 #else 1596 const Register arg1_lo = R2_tmp; 1597 const Register arg1_hi = R3_tmp; 1598 const Register arg2_lo = R0_tos_lo; 1599 const Register arg2_hi = R1_tos_hi; 1600 1601 __ pop_l(arg1_lo, arg1_hi); 1602 switch (op) { 1603 case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break; 1604 case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break; 1605 case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break; 1606 case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break; 1607 case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break; 1608 default : ShouldNotReachHere(); 1609 } 1610 #endif // AARCH64 1611 } 1612 1613 1614 void TemplateTable::idiv() { 1615 transition(itos, itos); 1616 #ifdef AARCH64 1617 const Register divisor = R0_tos; 1618 const Register dividend = R1_tmp; 1619 1620 __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry); 1621 __ pop_i(dividend); 1622 __ sdiv_w(R0_tos, dividend, divisor); 1623 #else 1624 __ mov(R2, R0_tos); 1625 __ pop_i(R0); 1626 // R0 - dividend 1627 // R2 - divisor 1628 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none); 1629 // R1 - result 1630 __ mov(R0_tos, R1); 1631 #endif // AARCH64 1632 } 1633 1634 1635 void TemplateTable::irem() { 1636 transition(itos, itos); 1637 #ifdef AARCH64 1638 const Register divisor = R0_tos; 1639 const Register dividend = R1_tmp; 1640 const Register quotient = R2_tmp; 1641 1642 __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry); 1643 __ pop_i(dividend); 1644 __ sdiv_w(quotient, dividend, divisor); 1645 __ msub_w(R0_tos, divisor, quotient, dividend); 1646 #else 1647 __ mov(R2, R0_tos); 1648 __ pop_i(R0); 1649 // R0 - dividend 1650 // R2 - divisor 1651 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none); 1652 // R0 - remainder 1653 #endif // AARCH64 1654 } 1655 1656 1657 void TemplateTable::lmul() { 1658 transition(ltos, ltos); 1659 #ifdef AARCH64 1660 const Register arg1 = R0_tos; 1661 const Register arg2 = R1_tmp; 1662 1663 __ pop_l(arg2); 1664 __ mul(R0_tos, arg1, arg2); 1665 #else 1666 const Register arg1_lo = R0_tos_lo; 1667 const Register arg1_hi = R1_tos_hi; 1668 const Register arg2_lo = R2_tmp; 1669 const Register arg2_hi = R3_tmp; 1670 1671 __ pop_l(arg2_lo, arg2_hi); 1672 1673 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi); 1674 #endif // AARCH64 1675 } 1676 1677 1678 void TemplateTable::ldiv() { 1679 transition(ltos, ltos); 1680 #ifdef AARCH64 1681 const Register divisor = R0_tos; 1682 const Register dividend = R1_tmp; 1683 1684 __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry); 1685 __ pop_l(dividend); 1686 __ sdiv(R0_tos, dividend, divisor); 1687 #else 1688 const Register x_lo = R2_tmp; 1689 const Register x_hi = R3_tmp; 1690 const Register y_lo = R0_tos_lo; 1691 const Register y_hi = R1_tos_hi; 1692 1693 __ pop_l(x_lo, x_hi); 1694 1695 // check if y = 0 1696 __ orrs(Rtemp, y_lo, y_hi); 1697 __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq); 1698 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi); 1699 #endif // AARCH64 1700 } 1701 1702 1703 void TemplateTable::lrem() { 1704 transition(ltos, ltos); 1705 #ifdef AARCH64 1706 const Register divisor = R0_tos; 1707 const Register dividend = R1_tmp; 1708 const Register quotient = R2_tmp; 1709 1710 __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry); 1711 __ pop_l(dividend); 1712 __ sdiv(quotient, dividend, divisor); 1713 __ msub(R0_tos, divisor, quotient, dividend); 1714 #else 1715 const Register x_lo = R2_tmp; 1716 const Register x_hi = R3_tmp; 1717 const Register y_lo = R0_tos_lo; 1718 const Register y_hi = R1_tos_hi; 1719 1720 __ pop_l(x_lo, x_hi); 1721 1722 // check if y = 0 1723 __ orrs(Rtemp, y_lo, y_hi); 1724 __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq); 1725 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi); 1726 #endif // AARCH64 1727 } 1728 1729 1730 void TemplateTable::lshl() { 1731 transition(itos, ltos); 1732 #ifdef AARCH64 1733 const Register val = R1_tmp; 1734 const Register shift_cnt = R0_tos; 1735 __ pop_l(val); 1736 __ lslv(R0_tos, val, shift_cnt); 1737 #else 1738 const Register shift_cnt = R4_tmp; 1739 const Register val_lo = R2_tmp; 1740 const Register val_hi = R3_tmp; 1741 1742 __ pop_l(val_lo, val_hi); 1743 __ andr(shift_cnt, R0_tos, 63); 1744 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt); 1745 #endif // AARCH64 1746 } 1747 1748 1749 void TemplateTable::lshr() { 1750 transition(itos, ltos); 1751 #ifdef AARCH64 1752 const Register val = R1_tmp; 1753 const Register shift_cnt = R0_tos; 1754 __ pop_l(val); 1755 __ asrv(R0_tos, val, shift_cnt); 1756 #else 1757 const Register shift_cnt = R4_tmp; 1758 const Register val_lo = R2_tmp; 1759 const Register val_hi = R3_tmp; 1760 1761 __ pop_l(val_lo, val_hi); 1762 __ andr(shift_cnt, R0_tos, 63); 1763 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt); 1764 #endif // AARCH64 1765 } 1766 1767 1768 void TemplateTable::lushr() { 1769 transition(itos, ltos); 1770 #ifdef AARCH64 1771 const Register val = R1_tmp; 1772 const Register shift_cnt = R0_tos; 1773 __ pop_l(val); 1774 __ lsrv(R0_tos, val, shift_cnt); 1775 #else 1776 const Register shift_cnt = R4_tmp; 1777 const Register val_lo = R2_tmp; 1778 const Register val_hi = R3_tmp; 1779 1780 __ pop_l(val_lo, val_hi); 1781 __ andr(shift_cnt, R0_tos, 63); 1782 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt); 1783 #endif // AARCH64 1784 } 1785 1786 1787 void TemplateTable::fop2(Operation op) { 1788 transition(ftos, ftos); 1789 #ifdef __SOFTFP__ 1790 __ mov(R1, R0_tos); 1791 __ pop_i(R0); 1792 switch (op) { 1793 case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break; 1794 case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break; 1795 case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break; 1796 case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break; 1797 case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break; 1798 default : ShouldNotReachHere(); 1799 } 1800 #else 1801 const FloatRegister arg1 = S1_tmp; 1802 const FloatRegister arg2 = S0_tos; 1803 1804 switch (op) { 1805 case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break; 1806 case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break; 1807 case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break; 1808 case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break; 1809 case rem: 1810 #ifndef __ABI_HARD__ 1811 __ pop_f(arg1); 1812 __ fmrs(R0, arg1); 1813 __ fmrs(R1, arg2); 1814 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); 1815 __ fmsr(S0_tos, R0); 1816 #else 1817 __ mov_float(S1_reg, arg2); 1818 __ pop_f(S0); 1819 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1820 #endif // !__ABI_HARD__ 1821 break; 1822 default : ShouldNotReachHere(); 1823 } 1824 #endif // __SOFTFP__ 1825 } 1826 1827 1828 void TemplateTable::dop2(Operation op) { 1829 transition(dtos, dtos); 1830 #ifdef __SOFTFP__ 1831 __ mov(R2, R0_tos_lo); 1832 __ mov(R3, R1_tos_hi); 1833 __ pop_l(R0, R1); 1834 switch (op) { 1835 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 1836 case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break; 1837 case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break; 1838 case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break; 1839 case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break; 1840 case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break; 1841 default : ShouldNotReachHere(); 1842 } 1843 #else 1844 const FloatRegister arg1 = D1_tmp; 1845 const FloatRegister arg2 = D0_tos; 1846 1847 switch (op) { 1848 case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break; 1849 case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break; 1850 case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break; 1851 case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break; 1852 case rem: 1853 #ifndef __ABI_HARD__ 1854 __ pop_d(arg1); 1855 __ fmrrd(R0, R1, arg1); 1856 __ fmrrd(R2, R3, arg2); 1857 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); 1858 __ fmdrr(D0_tos, R0, R1); 1859 #else 1860 __ mov_double(D1, arg2); 1861 __ pop_d(D0); 1862 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1863 #endif // !__ABI_HARD__ 1864 break; 1865 default : ShouldNotReachHere(); 1866 } 1867 #endif // __SOFTFP__ 1868 } 1869 1870 1871 void TemplateTable::ineg() { 1872 transition(itos, itos); 1873 __ neg_32(R0_tos, R0_tos); 1874 } 1875 1876 1877 void TemplateTable::lneg() { 1878 transition(ltos, ltos); 1879 #ifdef AARCH64 1880 __ neg(R0_tos, R0_tos); 1881 #else 1882 __ rsbs(R0_tos_lo, R0_tos_lo, 0); 1883 __ rsc (R1_tos_hi, R1_tos_hi, 0); 1884 #endif // AARCH64 1885 } 1886 1887 1888 void TemplateTable::fneg() { 1889 transition(ftos, ftos); 1890 #ifdef __SOFTFP__ 1891 // Invert sign bit 1892 const int sign_mask = 0x80000000; 1893 __ eor(R0_tos, R0_tos, sign_mask); 1894 #else 1895 __ neg_float(S0_tos, S0_tos); 1896 #endif // __SOFTFP__ 1897 } 1898 1899 1900 void TemplateTable::dneg() { 1901 transition(dtos, dtos); 1902 #ifdef __SOFTFP__ 1903 // Invert sign bit in the high part of the double 1904 const int sign_mask_hi = 0x80000000; 1905 __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi); 1906 #else 1907 __ neg_double(D0_tos, D0_tos); 1908 #endif // __SOFTFP__ 1909 } 1910 1911 1912 void TemplateTable::iinc() { 1913 transition(vtos, vtos); 1914 const Register Rconst = R2_tmp; 1915 const Register Rlocal_index = R1_tmp; 1916 const Register Rval = R0_tmp; 1917 1918 __ ldrsb(Rconst, at_bcp(2)); 1919 locals_index(Rlocal_index); 1920 Address local = load_iaddress(Rlocal_index, Rtemp); 1921 __ ldr_s32(Rval, local); 1922 __ add(Rval, Rval, Rconst); 1923 __ str_32(Rval, local); 1924 } 1925 1926 1927 void TemplateTable::wide_iinc() { 1928 transition(vtos, vtos); 1929 const Register Rconst = R2_tmp; 1930 const Register Rlocal_index = R1_tmp; 1931 const Register Rval = R0_tmp; 1932 1933 // get constant in Rconst 1934 __ ldrsb(R2_tmp, at_bcp(4)); 1935 __ ldrb(R3_tmp, at_bcp(5)); 1936 __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8)); 1937 1938 locals_index_wide(Rlocal_index); 1939 Address local = load_iaddress(Rlocal_index, Rtemp); 1940 __ ldr_s32(Rval, local); 1941 __ add(Rval, Rval, Rconst); 1942 __ str_32(Rval, local); 1943 } 1944 1945 1946 void TemplateTable::convert() { 1947 // Checking 1948 #ifdef ASSERT 1949 { TosState tos_in = ilgl; 1950 TosState tos_out = ilgl; 1951 switch (bytecode()) { 1952 case Bytecodes::_i2l: // fall through 1953 case Bytecodes::_i2f: // fall through 1954 case Bytecodes::_i2d: // fall through 1955 case Bytecodes::_i2b: // fall through 1956 case Bytecodes::_i2c: // fall through 1957 case Bytecodes::_i2s: tos_in = itos; break; 1958 case Bytecodes::_l2i: // fall through 1959 case Bytecodes::_l2f: // fall through 1960 case Bytecodes::_l2d: tos_in = ltos; break; 1961 case Bytecodes::_f2i: // fall through 1962 case Bytecodes::_f2l: // fall through 1963 case Bytecodes::_f2d: tos_in = ftos; break; 1964 case Bytecodes::_d2i: // fall through 1965 case Bytecodes::_d2l: // fall through 1966 case Bytecodes::_d2f: tos_in = dtos; break; 1967 default : ShouldNotReachHere(); 1968 } 1969 switch (bytecode()) { 1970 case Bytecodes::_l2i: // fall through 1971 case Bytecodes::_f2i: // fall through 1972 case Bytecodes::_d2i: // fall through 1973 case Bytecodes::_i2b: // fall through 1974 case Bytecodes::_i2c: // fall through 1975 case Bytecodes::_i2s: tos_out = itos; break; 1976 case Bytecodes::_i2l: // fall through 1977 case Bytecodes::_f2l: // fall through 1978 case Bytecodes::_d2l: tos_out = ltos; break; 1979 case Bytecodes::_i2f: // fall through 1980 case Bytecodes::_l2f: // fall through 1981 case Bytecodes::_d2f: tos_out = ftos; break; 1982 case Bytecodes::_i2d: // fall through 1983 case Bytecodes::_l2d: // fall through 1984 case Bytecodes::_f2d: tos_out = dtos; break; 1985 default : ShouldNotReachHere(); 1986 } 1987 transition(tos_in, tos_out); 1988 } 1989 #endif // ASSERT 1990 1991 // Conversion 1992 switch (bytecode()) { 1993 case Bytecodes::_i2l: 1994 #ifdef AARCH64 1995 __ sign_extend(R0_tos, R0_tos, 32); 1996 #else 1997 __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1)); 1998 #endif // AARCH64 1999 break; 2000 2001 case Bytecodes::_i2f: 2002 #ifdef AARCH64 2003 __ scvtf_sw(S0_tos, R0_tos); 2004 #else 2005 #ifdef __SOFTFP__ 2006 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos); 2007 #else 2008 __ fmsr(S0_tmp, R0_tos); 2009 __ fsitos(S0_tos, S0_tmp); 2010 #endif // __SOFTFP__ 2011 #endif // AARCH64 2012 break; 2013 2014 case Bytecodes::_i2d: 2015 #ifdef AARCH64 2016 __ scvtf_dw(D0_tos, R0_tos); 2017 #else 2018 #ifdef __SOFTFP__ 2019 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos); 2020 #else 2021 __ fmsr(S0_tmp, R0_tos); 2022 __ fsitod(D0_tos, S0_tmp); 2023 #endif // __SOFTFP__ 2024 #endif // AARCH64 2025 break; 2026 2027 case Bytecodes::_i2b: 2028 __ sign_extend(R0_tos, R0_tos, 8); 2029 break; 2030 2031 case Bytecodes::_i2c: 2032 __ zero_extend(R0_tos, R0_tos, 16); 2033 break; 2034 2035 case Bytecodes::_i2s: 2036 __ sign_extend(R0_tos, R0_tos, 16); 2037 break; 2038 2039 case Bytecodes::_l2i: 2040 /* nothing to do */ 2041 break; 2042 2043 case Bytecodes::_l2f: 2044 #ifdef AARCH64 2045 __ scvtf_sx(S0_tos, R0_tos); 2046 #else 2047 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi); 2048 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__) 2049 __ fmsr(S0_tos, R0); 2050 #endif // !__SOFTFP__ && !__ABI_HARD__ 2051 #endif // AARCH64 2052 break; 2053 2054 case Bytecodes::_l2d: 2055 #ifdef AARCH64 2056 __ scvtf_dx(D0_tos, R0_tos); 2057 #else 2058 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi); 2059 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__) 2060 __ fmdrr(D0_tos, R0, R1); 2061 #endif // !__SOFTFP__ && !__ABI_HARD__ 2062 #endif // AARCH64 2063 break; 2064 2065 case Bytecodes::_f2i: 2066 #ifdef AARCH64 2067 __ fcvtzs_ws(R0_tos, S0_tos); 2068 #else 2069 #ifndef __SOFTFP__ 2070 __ ftosizs(S0_tos, S0_tos); 2071 __ fmrs(R0_tos, S0_tos); 2072 #else 2073 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos); 2074 #endif // !__SOFTFP__ 2075 #endif // AARCH64 2076 break; 2077 2078 case Bytecodes::_f2l: 2079 #ifdef AARCH64 2080 __ fcvtzs_xs(R0_tos, S0_tos); 2081 #else 2082 #ifndef __SOFTFP__ 2083 __ fmrs(R0_tos, S0_tos); 2084 #endif // !__SOFTFP__ 2085 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos); 2086 #endif // AARCH64 2087 break; 2088 2089 case Bytecodes::_f2d: 2090 #ifdef __SOFTFP__ 2091 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos); 2092 #else 2093 __ convert_f2d(D0_tos, S0_tos); 2094 #endif // __SOFTFP__ 2095 break; 2096 2097 case Bytecodes::_d2i: 2098 #ifdef AARCH64 2099 __ fcvtzs_wd(R0_tos, D0_tos); 2100 #else 2101 #ifndef __SOFTFP__ 2102 __ ftosizd(Stemp, D0); 2103 __ fmrs(R0, Stemp); 2104 #else 2105 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi); 2106 #endif // !__SOFTFP__ 2107 #endif // AARCH64 2108 break; 2109 2110 case Bytecodes::_d2l: 2111 #ifdef AARCH64 2112 __ fcvtzs_xd(R0_tos, D0_tos); 2113 #else 2114 #ifndef __SOFTFP__ 2115 __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos); 2116 #endif // !__SOFTFP__ 2117 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi); 2118 #endif // AARCH64 2119 break; 2120 2121 case Bytecodes::_d2f: 2122 #ifdef __SOFTFP__ 2123 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi); 2124 #else 2125 __ convert_d2f(S0_tos, D0_tos); 2126 #endif // __SOFTFP__ 2127 break; 2128 2129 default: 2130 ShouldNotReachHere(); 2131 } 2132 } 2133 2134 2135 void TemplateTable::lcmp() { 2136 transition(ltos, itos); 2137 #ifdef AARCH64 2138 const Register arg1 = R1_tmp; 2139 const Register arg2 = R0_tos; 2140 2141 __ pop_l(arg1); 2142 2143 __ cmp(arg1, arg2); 2144 __ cset(R0_tos, gt); // 1 if '>', else 0 2145 __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1 2146 #else 2147 const Register arg1_lo = R2_tmp; 2148 const Register arg1_hi = R3_tmp; 2149 const Register arg2_lo = R0_tos_lo; 2150 const Register arg2_hi = R1_tos_hi; 2151 const Register res = R4_tmp; 2152 2153 __ pop_l(arg1_lo, arg1_hi); 2154 2155 // long compare arg1 with arg2 2156 // result is -1/0/+1 if '<'/'='/'>' 2157 Label done; 2158 2159 __ mov (res, 0); 2160 __ cmp (arg1_hi, arg2_hi); 2161 __ mvn (res, 0, lt); 2162 __ mov (res, 1, gt); 2163 __ b(done, ne); 2164 __ cmp (arg1_lo, arg2_lo); 2165 __ mvn (res, 0, lo); 2166 __ mov (res, 1, hi); 2167 __ bind(done); 2168 __ mov (R0_tos, res); 2169 #endif // AARCH64 2170 } 2171 2172 2173 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 2174 assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result"); 2175 2176 #ifdef AARCH64 2177 if (is_float) { 2178 transition(ftos, itos); 2179 __ pop_f(S1_tmp); 2180 __ fcmp_s(S1_tmp, S0_tos); 2181 } else { 2182 transition(dtos, itos); 2183 __ pop_d(D1_tmp); 2184 __ fcmp_d(D1_tmp, D0_tos); 2185 } 2186 2187 if (unordered_result < 0) { 2188 __ cset(R0_tos, gt); // 1 if '>', else 0 2189 __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1 2190 } else { 2191 __ cset(R0_tos, hi); // 1 if '>' or unordered, else 0 2192 __ csinv(R0_tos, R0_tos, ZR, pl); // previous value if '>=' or unordered, else -1 2193 } 2194 2195 #else 2196 2197 #ifdef __SOFTFP__ 2198 2199 if (is_float) { 2200 transition(ftos, itos); 2201 const Register Rx = R0; 2202 const Register Ry = R1; 2203 2204 __ mov(Ry, R0_tos); 2205 __ pop_i(Rx); 2206 2207 if (unordered_result == 1) { 2208 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry); 2209 } else { 2210 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry); 2211 } 2212 2213 } else { 2214 2215 transition(dtos, itos); 2216 const Register Rx_lo = R0; 2217 const Register Rx_hi = R1; 2218 const Register Ry_lo = R2; 2219 const Register Ry_hi = R3; 2220 2221 __ mov(Ry_lo, R0_tos_lo); 2222 __ mov(Ry_hi, R1_tos_hi); 2223 __ pop_l(Rx_lo, Rx_hi); 2224 2225 if (unordered_result == 1) { 2226 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi); 2227 } else { 2228 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi); 2229 } 2230 } 2231 2232 #else 2233 2234 if (is_float) { 2235 transition(ftos, itos); 2236 __ pop_f(S1_tmp); 2237 __ fcmps(S1_tmp, S0_tos); 2238 } else { 2239 transition(dtos, itos); 2240 __ pop_d(D1_tmp); 2241 __ fcmpd(D1_tmp, D0_tos); 2242 } 2243 2244 __ fmstat(); 2245 2246 // comparison result | flag N | flag Z | flag C | flag V 2247 // "<" | 1 | 0 | 0 | 0 2248 // "==" | 0 | 1 | 1 | 0 2249 // ">" | 0 | 0 | 1 | 0 2250 // unordered | 0 | 0 | 1 | 1 2251 2252 if (unordered_result < 0) { 2253 __ mov(R0_tos, 1); // result == 1 if greater 2254 __ mvn(R0_tos, 0, lt); // result == -1 if less or unordered (N!=V) 2255 } else { 2256 __ mov(R0_tos, 1); // result == 1 if greater or unordered 2257 __ mvn(R0_tos, 0, mi); // result == -1 if less (N=1) 2258 } 2259 __ mov(R0_tos, 0, eq); // result == 0 if equ (Z=1) 2260 #endif // __SOFTFP__ 2261 #endif // AARCH64 2262 } 2263 2264 2265 void TemplateTable::branch(bool is_jsr, bool is_wide) { 2266 2267 const Register Rdisp = R0_tmp; 2268 const Register Rbumped_taken_count = R5_tmp; 2269 2270 __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count 2271 2272 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + 2273 InvocationCounter::counter_offset(); 2274 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + 2275 InvocationCounter::counter_offset(); 2276 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 2277 2278 // Load up R0 with the branch displacement 2279 if (is_wide) { 2280 __ ldrsb(R0_tmp, at_bcp(1)); 2281 __ ldrb(R1_tmp, at_bcp(2)); 2282 __ ldrb(R2_tmp, at_bcp(3)); 2283 __ ldrb(R3_tmp, at_bcp(4)); 2284 __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2285 __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2286 __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2287 } else { 2288 __ ldrsb(R0_tmp, at_bcp(1)); 2289 __ ldrb(R1_tmp, at_bcp(2)); 2290 __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2291 } 2292 2293 // Handle all the JSR stuff here, then exit. 2294 // It's much shorter and cleaner than intermingling with the 2295 // non-JSR normal-branch stuff occuring below. 2296 if (is_jsr) { 2297 // compute return address as bci in R1 2298 const Register Rret_addr = R1_tmp; 2299 assert_different_registers(Rdisp, Rret_addr, Rtemp); 2300 2301 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 2302 __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset())); 2303 __ sub(Rret_addr, Rret_addr, Rtemp); 2304 2305 // Load the next target bytecode into R3_bytecode and advance Rbcp 2306 #ifdef AARCH64 2307 __ add(Rbcp, Rbcp, Rdisp); 2308 __ ldrb(R3_bytecode, Address(Rbcp)); 2309 #else 2310 __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed)); 2311 #endif // AARCH64 2312 2313 // Push return address 2314 __ push_i(Rret_addr); 2315 // jsr returns vtos 2316 __ dispatch_only_noverify(vtos); 2317 return; 2318 } 2319 2320 // Normal (non-jsr) branch handling 2321 2322 // Adjust the bcp by the displacement in Rdisp and load next bytecode. 2323 #ifdef AARCH64 2324 __ add(Rbcp, Rbcp, Rdisp); 2325 __ ldrb(R3_bytecode, Address(Rbcp)); 2326 #else 2327 __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed)); 2328 #endif // AARCH64 2329 2330 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); 2331 Label backedge_counter_overflow; 2332 Label profile_method; 2333 Label dispatch; 2334 2335 if (UseLoopCounter) { 2336 // increment backedge counter for backward branches 2337 // Rdisp (R0): target offset 2338 2339 const Register Rcnt = R2_tmp; 2340 const Register Rcounters = R1_tmp; 2341 2342 // count only if backward branch 2343 #ifdef AARCH64 2344 __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM 2345 #else 2346 __ tst(Rdisp, Rdisp); 2347 __ b(dispatch, pl); 2348 #endif // AARCH64 2349 2350 if (TieredCompilation) { 2351 Label no_mdo; 2352 int increment = InvocationCounter::count_increment; 2353 if (ProfileInterpreter) { 2354 // Are we profiling? 2355 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 2356 __ cbz(Rtemp, no_mdo); 2357 // Increment the MDO backedge counter 2358 const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) + 2359 in_bytes(InvocationCounter::counter_offset())); 2360 const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset())); 2361 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 2362 Rcnt, R4_tmp, eq, &backedge_counter_overflow); 2363 __ b(dispatch); 2364 } 2365 __ bind(no_mdo); 2366 // Increment backedge counter in MethodCounters* 2367 // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64 2368 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, 2369 Rdisp, R3_bytecode, 2370 AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg)); 2371 const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset())); 2372 __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask, 2373 Rcnt, R4_tmp, eq, &backedge_counter_overflow); 2374 } else { 2375 // Increment backedge counter in MethodCounters* 2376 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, 2377 Rdisp, R3_bytecode, 2378 AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg)); 2379 __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter 2380 __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter 2381 __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter 2382 2383 __ ldr_u32(Rcnt, Address(Rcounters, inv_offset)); // load invocation counter 2384 #ifdef AARCH64 2385 __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value); // and the status bits 2386 #else 2387 __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value); // and the status bits 2388 #endif // AARCH64 2389 __ add(Rcnt, Rcnt, Rtemp); // add both counters 2390 2391 if (ProfileInterpreter) { 2392 // Test to see if we should create a method data oop 2393 const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 2394 __ ldr_s32(Rtemp, profile_limit); 2395 __ cmp_32(Rcnt, Rtemp); 2396 __ b(dispatch, lt); 2397 2398 // if no method data exists, go to profile method 2399 __ test_method_data_pointer(R4_tmp, profile_method); 2400 2401 if (UseOnStackReplacement) { 2402 // check for overflow against Rbumped_taken_count, which is the MDO taken count 2403 const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2404 __ ldr_s32(Rtemp, backward_branch_limit); 2405 __ cmp(Rbumped_taken_count, Rtemp); 2406 __ b(dispatch, lo); 2407 2408 // When ProfileInterpreter is on, the backedge_count comes from the 2409 // MethodData*, which value does not get reset on the call to 2410 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2411 // routine while the method is being compiled, add a second test to make 2412 // sure the overflow function is called only once every overflow_frequency. 2413 const int overflow_frequency = 1024; 2414 2415 #ifdef AARCH64 2416 __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1)); 2417 #else 2418 // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0 2419 assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency"); 2420 __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22)); 2421 #endif // AARCH64 2422 2423 __ b(backedge_counter_overflow, eq); 2424 } 2425 } else { 2426 if (UseOnStackReplacement) { 2427 // check for overflow against Rcnt, which is the sum of the counters 2428 const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2429 __ ldr_s32(Rtemp, backward_branch_limit); 2430 __ cmp_32(Rcnt, Rtemp); 2431 __ b(backedge_counter_overflow, hs); 2432 2433 } 2434 } 2435 } 2436 __ bind(dispatch); 2437 } 2438 2439 if (!UseOnStackReplacement) { 2440 __ bind(backedge_counter_overflow); 2441 } 2442 2443 // continue with the bytecode @ target 2444 __ dispatch_only(vtos); 2445 2446 if (UseLoopCounter) { 2447 if (ProfileInterpreter) { 2448 // Out-of-line code to allocate method data oop. 2449 __ bind(profile_method); 2450 2451 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 2452 __ set_method_data_pointer_for_bcp(); 2453 // reload next bytecode 2454 __ ldrb(R3_bytecode, Address(Rbcp)); 2455 __ b(dispatch); 2456 } 2457 2458 if (UseOnStackReplacement) { 2459 // invocation counter overflow 2460 __ bind(backedge_counter_overflow); 2461 2462 __ sub(R1, Rbcp, Rdisp); // branch bcp 2463 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 2464 2465 // R0: osr nmethod (osr ok) or NULL (osr not possible) 2466 const Register Rnmethod = R0; 2467 2468 __ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode 2469 2470 __ cbz(Rnmethod, dispatch); // test result, no osr if null 2471 2472 // nmethod may have been invalidated (VM may block upon call_VM return) 2473 __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset())); 2474 __ cmp(R1_tmp, nmethod::in_use); 2475 __ b(dispatch, ne); 2476 2477 // We have the address of an on stack replacement routine in Rnmethod, 2478 // We need to prepare to execute the OSR method. First we must 2479 // migrate the locals and monitors off of the stack. 2480 2481 __ mov(Rtmp_save0, Rnmethod); // save the nmethod 2482 2483 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 2484 2485 // R0 is OSR buffer 2486 2487 __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset())); 2488 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 2489 2490 #ifdef AARCH64 2491 __ ldp(FP, LR, Address(FP)); 2492 __ mov(SP, Rtemp); 2493 #else 2494 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 2495 __ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack 2496 #endif // AARCH64 2497 2498 __ jump(R1_tmp); 2499 } 2500 } 2501 } 2502 2503 2504 void TemplateTable::if_0cmp(Condition cc) { 2505 transition(itos, vtos); 2506 // assume branch is more often taken than not (loops use backward branches) 2507 Label not_taken; 2508 #ifdef AARCH64 2509 if (cc == equal) { 2510 __ cbnz_w(R0_tos, not_taken); 2511 } else if (cc == not_equal) { 2512 __ cbz_w(R0_tos, not_taken); 2513 } else { 2514 __ cmp_32(R0_tos, 0); 2515 __ b(not_taken, convNegCond(cc)); 2516 } 2517 #else 2518 __ cmp_32(R0_tos, 0); 2519 __ b(not_taken, convNegCond(cc)); 2520 #endif // AARCH64 2521 branch(false, false); 2522 __ bind(not_taken); 2523 __ profile_not_taken_branch(R0_tmp); 2524 } 2525 2526 2527 void TemplateTable::if_icmp(Condition cc) { 2528 transition(itos, vtos); 2529 // assume branch is more often taken than not (loops use backward branches) 2530 Label not_taken; 2531 __ pop_i(R1_tmp); 2532 __ cmp_32(R1_tmp, R0_tos); 2533 __ b(not_taken, convNegCond(cc)); 2534 branch(false, false); 2535 __ bind(not_taken); 2536 __ profile_not_taken_branch(R0_tmp); 2537 } 2538 2539 2540 void TemplateTable::if_nullcmp(Condition cc) { 2541 transition(atos, vtos); 2542 assert(cc == equal || cc == not_equal, "invalid condition"); 2543 2544 // assume branch is more often taken than not (loops use backward branches) 2545 Label not_taken; 2546 if (cc == equal) { 2547 __ cbnz(R0_tos, not_taken); 2548 } else { 2549 __ cbz(R0_tos, not_taken); 2550 } 2551 branch(false, false); 2552 __ bind(not_taken); 2553 __ profile_not_taken_branch(R0_tmp); 2554 } 2555 2556 2557 void TemplateTable::if_acmp(Condition cc) { 2558 transition(atos, vtos); 2559 // assume branch is more often taken than not (loops use backward branches) 2560 Label not_taken; 2561 __ pop_ptr(R1_tmp); 2562 __ cmp(R1_tmp, R0_tos); 2563 __ b(not_taken, convNegCond(cc)); 2564 branch(false, false); 2565 __ bind(not_taken); 2566 __ profile_not_taken_branch(R0_tmp); 2567 } 2568 2569 2570 void TemplateTable::ret() { 2571 transition(vtos, vtos); 2572 const Register Rlocal_index = R1_tmp; 2573 const Register Rret_bci = Rtmp_save0; // R4/R19 2574 2575 locals_index(Rlocal_index); 2576 Address local = load_iaddress(Rlocal_index, Rtemp); 2577 __ ldr_s32(Rret_bci, local); // get return bci, compute return bcp 2578 __ profile_ret(Rtmp_save1, Rret_bci); 2579 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 2580 __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset())); 2581 __ add(Rbcp, Rtemp, Rret_bci); 2582 __ dispatch_next(vtos); 2583 } 2584 2585 2586 void TemplateTable::wide_ret() { 2587 transition(vtos, vtos); 2588 const Register Rlocal_index = R1_tmp; 2589 const Register Rret_bci = Rtmp_save0; // R4/R19 2590 2591 locals_index_wide(Rlocal_index); 2592 Address local = load_iaddress(Rlocal_index, Rtemp); 2593 __ ldr_s32(Rret_bci, local); // get return bci, compute return bcp 2594 __ profile_ret(Rtmp_save1, Rret_bci); 2595 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 2596 __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset())); 2597 __ add(Rbcp, Rtemp, Rret_bci); 2598 __ dispatch_next(vtos); 2599 } 2600 2601 2602 void TemplateTable::tableswitch() { 2603 transition(itos, vtos); 2604 2605 const Register Rindex = R0_tos; 2606 #ifndef AARCH64 2607 const Register Rtemp2 = R1_tmp; 2608 #endif // !AARCH64 2609 const Register Rabcp = R2_tmp; // aligned bcp 2610 const Register Rlow = R3_tmp; 2611 const Register Rhigh = R4_tmp; 2612 const Register Roffset = R5_tmp; 2613 2614 // align bcp 2615 __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1)); 2616 __ align_reg(Rabcp, Rtemp, BytesPerInt); 2617 2618 // load lo & hi 2619 #ifdef AARCH64 2620 __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed)); 2621 #else 2622 __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback); 2623 #endif // AARCH64 2624 __ byteswap_u32(Rlow, Rtemp, Rtemp2); 2625 __ byteswap_u32(Rhigh, Rtemp, Rtemp2); 2626 2627 // compare index with high bound 2628 __ cmp_32(Rhigh, Rindex); 2629 2630 #ifdef AARCH64 2631 Label default_case, do_dispatch; 2632 __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge); 2633 __ b(default_case, lt); 2634 2635 __ sub_w(Rindex, Rindex, Rlow); 2636 __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt)); 2637 if(ProfileInterpreter) { 2638 __ sxtw(Rindex, Rindex); 2639 __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp); 2640 } 2641 __ b(do_dispatch); 2642 2643 __ bind(default_case); 2644 __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt)); 2645 if(ProfileInterpreter) { 2646 __ profile_switch_default(R0_tmp); 2647 } 2648 2649 __ bind(do_dispatch); 2650 #else 2651 2652 // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow) 2653 __ subs(Rindex, Rindex, Rlow, ge); 2654 2655 // if Rindex <= Rhigh and (Rindex - Rlow) >= 0 2656 // ("ge" status accumulated from cmp and subs instructions) then load 2657 // offset from table, otherwise load offset for default case 2658 2659 if(ProfileInterpreter) { 2660 Label default_case, continue_execution; 2661 2662 __ b(default_case, lt); 2663 __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt)); 2664 __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp); 2665 __ b(continue_execution); 2666 2667 __ bind(default_case); 2668 __ profile_switch_default(R0_tmp); 2669 __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt)); 2670 2671 __ bind(continue_execution); 2672 } else { 2673 __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt); 2674 __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge); 2675 } 2676 #endif // AARCH64 2677 2678 __ byteswap_u32(Roffset, Rtemp, Rtemp2); 2679 2680 // load the next bytecode to R3_bytecode and advance Rbcp 2681 #ifdef AARCH64 2682 __ add(Rbcp, Rbcp, Roffset, ex_sxtw); 2683 __ ldrb(R3_bytecode, Address(Rbcp)); 2684 #else 2685 __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed)); 2686 #endif // AARCH64 2687 __ dispatch_only(vtos); 2688 2689 } 2690 2691 2692 void TemplateTable::lookupswitch() { 2693 transition(itos, itos); 2694 __ stop("lookupswitch bytecode should have been rewritten"); 2695 } 2696 2697 2698 void TemplateTable::fast_linearswitch() { 2699 transition(itos, vtos); 2700 Label loop, found, default_case, continue_execution; 2701 2702 const Register Rkey = R0_tos; 2703 const Register Rabcp = R2_tmp; // aligned bcp 2704 const Register Rdefault = R3_tmp; 2705 const Register Rcount = R4_tmp; 2706 const Register Roffset = R5_tmp; 2707 2708 // bswap Rkey, so we can avoid bswapping the table entries 2709 __ byteswap_u32(Rkey, R1_tmp, Rtemp); 2710 2711 // align bcp 2712 __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1)); 2713 __ align_reg(Rabcp, Rtemp, BytesPerInt); 2714 2715 // load default & counter 2716 #ifdef AARCH64 2717 __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed)); 2718 #else 2719 __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback); 2720 #endif // AARCH64 2721 __ byteswap_u32(Rcount, R1_tmp, Rtemp); 2722 2723 #ifdef AARCH64 2724 __ cbz_w(Rcount, default_case); 2725 #else 2726 __ cmp_32(Rcount, 0); 2727 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne); 2728 __ b(default_case, eq); 2729 #endif // AARCH64 2730 2731 // table search 2732 __ bind(loop); 2733 #ifdef AARCH64 2734 __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed)); 2735 #endif // AARCH64 2736 __ cmp_32(Rtemp, Rkey); 2737 __ b(found, eq); 2738 __ subs(Rcount, Rcount, 1); 2739 #ifndef AARCH64 2740 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne); 2741 #endif // !AARCH64 2742 __ b(loop, ne); 2743 2744 // default case 2745 __ bind(default_case); 2746 __ profile_switch_default(R0_tmp); 2747 __ mov(Roffset, Rdefault); 2748 __ b(continue_execution); 2749 2750 // entry found -> get offset 2751 __ bind(found); 2752 // Rabcp is already incremented and points to the next entry 2753 __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt)); 2754 if (ProfileInterpreter) { 2755 // Calculate index of the selected case. 2756 assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp); 2757 2758 // align bcp 2759 __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1)); 2760 __ align_reg(R2_tmp, Rtemp, BytesPerInt); 2761 2762 // load number of cases 2763 __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt)); 2764 __ byteswap_u32(R2_tmp, R1_tmp, Rtemp); 2765 2766 // Selected index = <number of cases> - <current loop count> 2767 __ sub(R1_tmp, R2_tmp, Rcount); 2768 __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp); 2769 } 2770 2771 // continue execution 2772 __ bind(continue_execution); 2773 __ byteswap_u32(Roffset, R1_tmp, Rtemp); 2774 2775 // load the next bytecode to R3_bytecode and advance Rbcp 2776 #ifdef AARCH64 2777 __ add(Rbcp, Rbcp, Roffset, ex_sxtw); 2778 __ ldrb(R3_bytecode, Address(Rbcp)); 2779 #else 2780 __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed)); 2781 #endif // AARCH64 2782 __ dispatch_only(vtos); 2783 } 2784 2785 2786 void TemplateTable::fast_binaryswitch() { 2787 transition(itos, vtos); 2788 // Implementation using the following core algorithm: 2789 // 2790 // int binary_search(int key, LookupswitchPair* array, int n) { 2791 // // Binary search according to "Methodik des Programmierens" by 2792 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2793 // int i = 0; 2794 // int j = n; 2795 // while (i+1 < j) { 2796 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2797 // // with Q: for all i: 0 <= i < n: key < a[i] 2798 // // where a stands for the array and assuming that the (inexisting) 2799 // // element a[n] is infinitely big. 2800 // int h = (i + j) >> 1; 2801 // // i < h < j 2802 // if (key < array[h].fast_match()) { 2803 // j = h; 2804 // } else { 2805 // i = h; 2806 // } 2807 // } 2808 // // R: a[i] <= key < a[i+1] or Q 2809 // // (i.e., if key is within array, i is the correct index) 2810 // return i; 2811 // } 2812 2813 // register allocation 2814 const Register key = R0_tos; // already set (tosca) 2815 const Register array = R1_tmp; 2816 const Register i = R2_tmp; 2817 const Register j = R3_tmp; 2818 const Register h = R4_tmp; 2819 const Register val = R5_tmp; 2820 const Register temp1 = Rtemp; 2821 const Register temp2 = LR_tmp; 2822 const Register offset = R3_tmp; 2823 2824 // set 'array' = aligned bcp + 2 ints 2825 __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt); 2826 __ align_reg(array, temp1, BytesPerInt); 2827 2828 // initialize i & j 2829 __ mov(i, 0); // i = 0; 2830 __ ldr_s32(j, Address(array, -BytesPerInt)); // j = length(array); 2831 // Convert j into native byteordering 2832 __ byteswap_u32(j, temp1, temp2); 2833 2834 // and start 2835 Label entry; 2836 __ b(entry); 2837 2838 // binary search loop 2839 { Label loop; 2840 __ bind(loop); 2841 // int h = (i + j) >> 1; 2842 __ add(h, i, j); // h = i + j; 2843 __ logical_shift_right(h, h, 1); // h = (i + j) >> 1; 2844 // if (key < array[h].fast_match()) { 2845 // j = h; 2846 // } else { 2847 // i = h; 2848 // } 2849 #ifdef AARCH64 2850 __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt)); 2851 __ ldr_s32(val, Address(temp1)); 2852 #else 2853 __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt)); 2854 #endif // AARCH64 2855 // Convert array[h].match to native byte-ordering before compare 2856 __ byteswap_u32(val, temp1, temp2); 2857 __ cmp_32(key, val); 2858 __ mov(j, h, lt); // j = h if (key < array[h].fast_match()) 2859 __ mov(i, h, ge); // i = h if (key >= array[h].fast_match()) 2860 // while (i+1 < j) 2861 __ bind(entry); 2862 __ add(temp1, i, 1); // i+1 2863 __ cmp(temp1, j); // i+1 < j 2864 __ b(loop, lt); 2865 } 2866 2867 // end of binary search, result index is i (must check again!) 2868 Label default_case; 2869 // Convert array[i].match to native byte-ordering before compare 2870 #ifdef AARCH64 2871 __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt)); 2872 __ ldr_s32(val, Address(temp1)); 2873 #else 2874 __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt)); 2875 #endif // AARCH64 2876 __ byteswap_u32(val, temp1, temp2); 2877 __ cmp_32(key, val); 2878 __ b(default_case, ne); 2879 2880 // entry found 2881 __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt)); 2882 __ ldr_s32(offset, Address(temp1, 1*BytesPerInt)); 2883 __ profile_switch_case(R0, i, R1, i); 2884 __ byteswap_u32(offset, temp1, temp2); 2885 #ifdef AARCH64 2886 __ add(Rbcp, Rbcp, offset, ex_sxtw); 2887 __ ldrb(R3_bytecode, Address(Rbcp)); 2888 #else 2889 __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed)); 2890 #endif // AARCH64 2891 __ dispatch_only(vtos); 2892 2893 // default case 2894 __ bind(default_case); 2895 __ profile_switch_default(R0); 2896 __ ldr_s32(offset, Address(array, -2*BytesPerInt)); 2897 __ byteswap_u32(offset, temp1, temp2); 2898 #ifdef AARCH64 2899 __ add(Rbcp, Rbcp, offset, ex_sxtw); 2900 __ ldrb(R3_bytecode, Address(Rbcp)); 2901 #else 2902 __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed)); 2903 #endif // AARCH64 2904 __ dispatch_only(vtos); 2905 } 2906 2907 2908 void TemplateTable::_return(TosState state) { 2909 transition(state, state); 2910 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation 2911 2912 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2913 Label skip_register_finalizer; 2914 assert(state == vtos, "only valid state"); 2915 __ ldr(R1, aaddress(0)); 2916 __ load_klass(Rtemp, R1); 2917 __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset())); 2918 __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer); 2919 2920 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1); 2921 2922 __ bind(skip_register_finalizer); 2923 } 2924 2925 // Narrow result if state is itos but result type is smaller. 2926 // Need to narrow in the return bytecode rather than in generate_return_entry 2927 // since compiled code callers expect the result to already be narrowed. 2928 if (state == itos) { 2929 __ narrow(R0_tos); 2930 } 2931 __ remove_activation(state, LR); 2932 2933 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 2934 2935 #ifndef AARCH64 2936 // According to interpreter calling conventions, result is returned in R0/R1, 2937 // so ftos (S0) and dtos (D0) are moved to R0/R1. 2938 // This conversion should be done after remove_activation, as it uses 2939 // push(state) & pop(state) to preserve return value. 2940 __ convert_tos_to_retval(state); 2941 #endif // !AARCH64 2942 2943 __ ret(); 2944 2945 __ nop(); // to avoid filling CPU pipeline with invalid instructions 2946 __ nop(); 2947 } 2948 2949 2950 // ---------------------------------------------------------------------------- 2951 // Volatile variables demand their effects be made known to all CPU's in 2952 // order. Store buffers on most chips allow reads & writes to reorder; the 2953 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2954 // memory barrier (i.e., it's not sufficient that the interpreter does not 2955 // reorder volatile references, the hardware also must not reorder them). 2956 // 2957 // According to the new Java Memory Model (JMM): 2958 // (1) All volatiles are serialized wrt to each other. 2959 // ALSO reads & writes act as aquire & release, so: 2960 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2961 // the read float up to before the read. It's OK for non-volatile memory refs 2962 // that happen before the volatile read to float down below it. 2963 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2964 // that happen BEFORE the write float down to after the write. It's OK for 2965 // non-volatile memory refs that happen after the volatile write to float up 2966 // before it. 2967 // 2968 // We only put in barriers around volatile refs (they are expensive), not 2969 // _between_ memory refs (that would require us to track the flavor of the 2970 // previous memory refs). Requirements (2) and (3) require some barriers 2971 // before volatile stores and after volatile loads. These nearly cover 2972 // requirement (1) but miss the volatile-store-volatile-load case. This final 2973 // case is placed after volatile-stores although it could just as well go 2974 // before volatile-loads. 2975 // TODO-AARCH64: consider removing extra unused parameters 2976 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint, 2977 Register tmp, 2978 bool preserve_flags, 2979 Register load_tgt) { 2980 #ifdef AARCH64 2981 __ membar(order_constraint); 2982 #else 2983 __ membar(order_constraint, tmp, preserve_flags, load_tgt); 2984 #endif 2985 } 2986 2987 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR. 2988 void TemplateTable::resolve_cache_and_index(int byte_no, 2989 Register Rcache, 2990 Register Rindex, 2991 size_t index_size) { 2992 assert_different_registers(Rcache, Rindex, Rtemp); 2993 2994 Label resolved; 2995 Bytecodes::Code code = bytecode(); 2996 switch (code) { 2997 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2998 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2999 } 3000 3001 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 3002 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size); 3003 __ cmp(Rtemp, code); // have we resolved this bytecode? 3004 __ b(resolved, eq); 3005 3006 // resolve first time through 3007 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 3008 __ mov(R1, code); 3009 __ call_VM(noreg, entry, R1); 3010 // Update registers with resolved info 3011 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 3012 __ bind(resolved); 3013 } 3014 3015 3016 // The Rcache and Rindex registers must be set before call 3017 void TemplateTable::load_field_cp_cache_entry(Register Rcache, 3018 Register Rindex, 3019 Register Roffset, 3020 Register Rflags, 3021 Register Robj, 3022 bool is_static = false) { 3023 3024 assert_different_registers(Rcache, Rindex, Rtemp); 3025 assert_different_registers(Roffset, Rflags, Robj, Rtemp); 3026 3027 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3028 3029 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3030 3031 // Field offset 3032 __ ldr(Roffset, Address(Rtemp, 3033 cp_base_offset + ConstantPoolCacheEntry::f2_offset())); 3034 3035 // Flags 3036 __ ldr_u32(Rflags, Address(Rtemp, 3037 cp_base_offset + ConstantPoolCacheEntry::flags_offset())); 3038 3039 if (is_static) { 3040 __ ldr(Robj, Address(Rtemp, 3041 cp_base_offset + ConstantPoolCacheEntry::f1_offset())); 3042 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3043 __ ldr(Robj, Address(Robj, mirror_offset)); 3044 __ resolve_oop_handle(Robj); 3045 } 3046 } 3047 3048 3049 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR. 3050 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 3051 Register method, 3052 Register itable_index, 3053 Register flags, 3054 bool is_invokevirtual, 3055 bool is_invokevfinal/*unused*/, 3056 bool is_invokedynamic) { 3057 // setup registers 3058 const Register cache = R2_tmp; 3059 const Register index = R3_tmp; 3060 const Register temp_reg = Rtemp; 3061 assert_different_registers(cache, index, temp_reg); 3062 assert_different_registers(method, itable_index, temp_reg); 3063 3064 // determine constant pool cache field offsets 3065 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 3066 const int method_offset = in_bytes( 3067 ConstantPoolCache::base_offset() + 3068 ((byte_no == f2_byte) 3069 ? ConstantPoolCacheEntry::f2_offset() 3070 : ConstantPoolCacheEntry::f1_offset() 3071 ) 3072 ); 3073 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 3074 ConstantPoolCacheEntry::flags_offset()); 3075 // access constant pool cache fields 3076 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 3077 ConstantPoolCacheEntry::f2_offset()); 3078 3079 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 3080 resolve_cache_and_index(byte_no, cache, index, index_size); 3081 __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord)); 3082 __ ldr(method, Address(temp_reg, method_offset)); 3083 3084 if (itable_index != noreg) { 3085 __ ldr(itable_index, Address(temp_reg, index_offset)); 3086 } 3087 __ ldr_u32(flags, Address(temp_reg, flags_offset)); 3088 } 3089 3090 3091 // The registers cache and index expected to be set before call, and should not be Rtemp. 3092 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, 3093 // except cache and index registers which are preserved. 3094 void TemplateTable::jvmti_post_field_access(Register Rcache, 3095 Register Rindex, 3096 bool is_static, 3097 bool has_tos) { 3098 assert_different_registers(Rcache, Rindex, Rtemp); 3099 3100 if (__ can_post_field_access()) { 3101 // Check to see if a field access watch has been set before we take 3102 // the time to call into the VM. 3103 3104 Label Lcontinue; 3105 3106 __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr()); 3107 __ cbz(Rtemp, Lcontinue); 3108 3109 // cache entry pointer 3110 __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3111 __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset())); 3112 if (is_static) { 3113 __ mov(R1, 0); // NULL object reference 3114 } else { 3115 __ pop(atos); // Get the object 3116 __ mov(R1, R0_tos); 3117 __ verify_oop(R1); 3118 __ push(atos); // Restore stack state 3119 } 3120 // R1: object pointer or NULL 3121 // R2: cache entry pointer 3122 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 3123 R1, R2); 3124 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3125 3126 __ bind(Lcontinue); 3127 } 3128 } 3129 3130 3131 void TemplateTable::pop_and_check_object(Register r) { 3132 __ pop_ptr(r); 3133 __ null_check(r, Rtemp); // for field access must check obj. 3134 __ verify_oop(r); 3135 } 3136 3137 3138 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 3139 transition(vtos, vtos); 3140 3141 const Register Roffset = R2_tmp; 3142 const Register Robj = R3_tmp; 3143 const Register Rcache = R4_tmp; 3144 const Register Rflagsav = Rtmp_save0; // R4/R19 3145 const Register Rindex = R5_tmp; 3146 const Register Rflags = R5_tmp; 3147 3148 const bool gen_volatile_check = os::is_MP(); 3149 3150 resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2)); 3151 jvmti_post_field_access(Rcache, Rindex, is_static, false); 3152 load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static); 3153 3154 if (gen_volatile_check) { 3155 __ mov(Rflagsav, Rflags); 3156 } 3157 3158 if (!is_static) pop_and_check_object(Robj); 3159 3160 Label Done, Lint, Ltable, shouldNotReachHere; 3161 Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos; 3162 3163 // compute type 3164 __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift); 3165 // Make sure we don't need to mask flags after the above shift 3166 ConstantPoolCacheEntry::verify_tos_state_shift(); 3167 3168 // There are actually two versions of implementation of getfield/getstatic: 3169 // 3170 // 32-bit ARM: 3171 // 1) Table switch using add(PC,...) instruction (fast_version) 3172 // 2) Table switch using ldr(PC,...) instruction 3173 // 3174 // AArch64: 3175 // 1) Table switch using adr/add/br instructions (fast_version) 3176 // 2) Table switch using adr/ldr/br instructions 3177 // 3178 // First version requires fixed size of code block for each case and 3179 // can not be used in RewriteBytecodes and VerifyOops 3180 // modes. 3181 3182 // Size of fixed size code block for fast_version 3183 const int log_max_block_size = AARCH64_ONLY(2) NOT_AARCH64(3); 3184 const int max_block_size = 1 << log_max_block_size; 3185 3186 // Decide if fast version is enabled 3187 bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop; 3188 3189 // On 32-bit ARM atos and itos cases can be merged only for fast version, because 3190 // atos requires additional processing in slow version. 3191 // On AArch64 atos and itos cannot be merged. 3192 bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version); 3193 3194 assert(number_of_states == 10, "number of tos states should be equal to 9"); 3195 3196 __ cmp(Rflags, itos); 3197 #ifdef AARCH64 3198 __ b(Lint, eq); 3199 3200 if(fast_version) { 3201 __ adr(Rtemp, Lbtos); 3202 __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize)); 3203 __ br(Rtemp); 3204 } else { 3205 __ adr(Rtemp, Ltable); 3206 __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags)); 3207 __ br(Rtemp); 3208 } 3209 #else 3210 if(atos_merged_with_itos) { 3211 __ cmp(Rflags, atos, ne); 3212 } 3213 3214 // table switch by type 3215 if(fast_version) { 3216 __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne); 3217 } else { 3218 __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); 3219 } 3220 3221 // jump to itos/atos case 3222 __ b(Lint); 3223 #endif // AARCH64 3224 3225 // table with addresses for slow version 3226 if (fast_version) { 3227 // nothing to do 3228 } else { 3229 AARCH64_ONLY(__ align(wordSize)); 3230 __ bind(Ltable); 3231 __ emit_address(Lbtos); 3232 __ emit_address(Lztos); 3233 __ emit_address(Lctos); 3234 __ emit_address(Lstos); 3235 __ emit_address(Litos); 3236 __ emit_address(Lltos); 3237 __ emit_address(Lftos); 3238 __ emit_address(Ldtos); 3239 __ emit_address(Latos); 3240 } 3241 3242 #ifdef ASSERT 3243 int seq = 0; 3244 #endif 3245 // btos 3246 { 3247 assert(btos == seq++, "btos has unexpected value"); 3248 FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version); 3249 __ bind(Lbtos); 3250 __ access_load_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 3251 __ push(btos); 3252 // Rewrite bytecode to be faster 3253 if (!is_static && rc == may_rewrite) { 3254 patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp); 3255 } 3256 __ b(Done); 3257 } 3258 3259 // ztos (same as btos for getfield) 3260 { 3261 assert(ztos == seq++, "btos has unexpected value"); 3262 FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version); 3263 __ bind(Lztos); 3264 __ access_load_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 3265 __ push(ztos); 3266 // Rewrite bytecode to be faster (use btos fast getfield) 3267 if (!is_static && rc == may_rewrite) { 3268 patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp); 3269 } 3270 __ b(Done); 3271 } 3272 3273 // ctos 3274 { 3275 assert(ctos == seq++, "ctos has unexpected value"); 3276 FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version); 3277 __ bind(Lctos); 3278 __ access_load_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 3279 __ push(ctos); 3280 if (!is_static && rc == may_rewrite) { 3281 patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp); 3282 } 3283 __ b(Done); 3284 } 3285 3286 // stos 3287 { 3288 assert(stos == seq++, "stos has unexpected value"); 3289 FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version); 3290 __ bind(Lstos); 3291 __ access_load_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 3292 __ push(stos); 3293 if (!is_static && rc == may_rewrite) { 3294 patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp); 3295 } 3296 __ b(Done); 3297 } 3298 3299 // itos 3300 { 3301 assert(itos == seq++, "itos has unexpected value"); 3302 FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version); 3303 __ bind(Litos); 3304 __ b(shouldNotReachHere); 3305 } 3306 3307 // ltos 3308 { 3309 assert(ltos == seq++, "ltos has unexpected value"); 3310 FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version); 3311 __ bind(Lltos); 3312 #ifdef AARCH64 3313 __ ldr(R0_tos, Address(Robj, Roffset)); 3314 #else 3315 __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg); 3316 #endif // AARCH64 3317 __ push(ltos); 3318 if (!is_static && rc == may_rewrite) { 3319 patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp); 3320 } 3321 __ b(Done); 3322 } 3323 3324 // ftos 3325 { 3326 assert(ftos == seq++, "ftos has unexpected value"); 3327 FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version); 3328 __ bind(Lftos); 3329 // floats and ints are placed on stack in same way, so 3330 // we can use push(itos) to transfer value without using VFP 3331 __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 3332 __ push(itos); 3333 if (!is_static && rc == may_rewrite) { 3334 patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp); 3335 } 3336 __ b(Done); 3337 } 3338 3339 // dtos 3340 { 3341 assert(dtos == seq++, "dtos has unexpected value"); 3342 FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version); 3343 __ bind(Ldtos); 3344 // doubles and longs are placed on stack in the same way, so 3345 // we can use push(ltos) to transfer value without using VFP 3346 #ifdef AARCH64 3347 __ ldr(R0_tos, Address(Robj, Roffset)); 3348 #else 3349 __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg); 3350 #endif // AARCH64 3351 __ push(ltos); 3352 if (!is_static && rc == may_rewrite) { 3353 patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp); 3354 } 3355 __ b(Done); 3356 } 3357 3358 // atos 3359 { 3360 assert(atos == seq++, "atos has unexpected value"); 3361 3362 // atos case for AArch64 and slow version on 32-bit ARM 3363 if(!atos_merged_with_itos) { 3364 __ bind(Latos); 3365 do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); 3366 __ push(atos); 3367 // Rewrite bytecode to be faster 3368 if (!is_static && rc == may_rewrite) { 3369 patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp); 3370 } 3371 __ b(Done); 3372 } 3373 } 3374 3375 assert(vtos == seq++, "vtos has unexpected value"); 3376 3377 __ bind(shouldNotReachHere); 3378 __ should_not_reach_here(); 3379 3380 // itos and atos cases are frequent so it makes sense to move them out of table switch 3381 // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only 3382 3383 __ bind(Lint); 3384 __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 3385 __ push(itos); 3386 // Rewrite bytecode to be faster 3387 if (!is_static && rc == may_rewrite) { 3388 patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp); 3389 } 3390 3391 __ bind(Done); 3392 3393 if (gen_volatile_check) { 3394 // Check for volatile field 3395 Label notVolatile; 3396 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3397 3398 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 3399 3400 __ bind(notVolatile); 3401 } 3402 3403 } 3404 3405 void TemplateTable::getfield(int byte_no) { 3406 getfield_or_static(byte_no, false); 3407 } 3408 3409 void TemplateTable::nofast_getfield(int byte_no) { 3410 getfield_or_static(byte_no, false, may_not_rewrite); 3411 } 3412 3413 void TemplateTable::getstatic(int byte_no) { 3414 getfield_or_static(byte_no, true); 3415 } 3416 3417 3418 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp. 3419 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, 3420 // except cache and index registers which are preserved. 3421 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) { 3422 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3423 assert_different_registers(Rcache, Rindex, R1, Rtemp); 3424 3425 if (__ can_post_field_modification()) { 3426 // Check to see if a field modification watch has been set before we take 3427 // the time to call into the VM. 3428 Label Lcontinue; 3429 3430 __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr()); 3431 __ cbz(Rtemp, Lcontinue); 3432 3433 if (is_static) { 3434 // Life is simple. Null out the object pointer. 3435 __ mov(R1, 0); 3436 } else { 3437 // Life is harder. The stack holds the value on top, followed by the object. 3438 // We don't know the size of the value, though; it could be one or two words 3439 // depending on its type. As a result, we must find the type to determine where 3440 // the object is. 3441 3442 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3443 __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset())); 3444 3445 __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift); 3446 // Make sure we don't need to mask Rtemp after the above shift 3447 ConstantPoolCacheEntry::verify_tos_state_shift(); 3448 3449 __ cmp(Rtemp, ltos); 3450 __ cond_cmp(Rtemp, dtos, ne); 3451 #ifdef AARCH64 3452 __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2)); 3453 __ mov(R1, Interpreter::expr_offset_in_bytes(1)); 3454 __ mov(R1, Rtemp, eq); 3455 __ ldr(R1, Address(Rstack_top, R1)); 3456 #else 3457 // two word value (ltos/dtos) 3458 __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq); 3459 3460 // one word value (not ltos, dtos) 3461 __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne); 3462 #endif // AARCH64 3463 } 3464 3465 // cache entry pointer 3466 __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3467 __ add(R2, R2, in_bytes(cp_base_offset)); 3468 3469 // object (tos) 3470 __ mov(R3, Rstack_top); 3471 3472 // R1: object pointer set up above (NULL if static) 3473 // R2: cache entry pointer 3474 // R3: value object on the stack 3475 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 3476 R1, R2, R3); 3477 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3478 3479 __ bind(Lcontinue); 3480 } 3481 } 3482 3483 3484 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 3485 transition(vtos, vtos); 3486 3487 const Register Roffset = R2_tmp; 3488 const Register Robj = R3_tmp; 3489 const Register Rcache = R4_tmp; 3490 const Register Rflagsav = Rtmp_save0; // R4/R19 3491 const Register Rindex = R5_tmp; 3492 const Register Rflags = R5_tmp; 3493 3494 const bool gen_volatile_check = os::is_MP(); 3495 3496 resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2)); 3497 jvmti_post_field_mod(Rcache, Rindex, is_static); 3498 load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static); 3499 3500 if (gen_volatile_check) { 3501 // Check for volatile field 3502 Label notVolatile; 3503 __ mov(Rflagsav, Rflags); 3504 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3505 3506 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 3507 3508 __ bind(notVolatile); 3509 } 3510 3511 Label Done, Lint, shouldNotReachHere; 3512 Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos; 3513 3514 // compute type 3515 __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift); 3516 // Make sure we don't need to mask flags after the above shift 3517 ConstantPoolCacheEntry::verify_tos_state_shift(); 3518 3519 // There are actually two versions of implementation of putfield/putstatic: 3520 // 3521 // 32-bit ARM: 3522 // 1) Table switch using add(PC,...) instruction (fast_version) 3523 // 2) Table switch using ldr(PC,...) instruction 3524 // 3525 // AArch64: 3526 // 1) Table switch using adr/add/br instructions (fast_version) 3527 // 2) Table switch using adr/ldr/br instructions 3528 // 3529 // First version requires fixed size of code block for each case and 3530 // can not be used in RewriteBytecodes and VerifyOops 3531 // modes. 3532 3533 // Size of fixed size code block for fast_version (in instructions) 3534 const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3); 3535 const int max_block_size = 1 << log_max_block_size; 3536 3537 // Decide if fast version is enabled 3538 bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits; 3539 3540 assert(number_of_states == 10, "number of tos states should be equal to 9"); 3541 3542 // itos case is frequent and is moved outside table switch 3543 __ cmp(Rflags, itos); 3544 3545 #ifdef AARCH64 3546 __ b(Lint, eq); 3547 3548 if (fast_version) { 3549 __ adr(Rtemp, Lbtos); 3550 __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize)); 3551 __ br(Rtemp); 3552 } else { 3553 __ adr(Rtemp, Ltable); 3554 __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags)); 3555 __ br(Rtemp); 3556 } 3557 #else 3558 // table switch by type 3559 if (fast_version) { 3560 __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne); 3561 } else { 3562 __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); 3563 } 3564 3565 // jump to itos case 3566 __ b(Lint); 3567 #endif // AARCH64 3568 3569 // table with addresses for slow version 3570 if (fast_version) { 3571 // nothing to do 3572 } else { 3573 AARCH64_ONLY(__ align(wordSize)); 3574 __ bind(Ltable); 3575 __ emit_address(Lbtos); 3576 __ emit_address(Lztos); 3577 __ emit_address(Lctos); 3578 __ emit_address(Lstos); 3579 __ emit_address(Litos); 3580 __ emit_address(Lltos); 3581 __ emit_address(Lftos); 3582 __ emit_address(Ldtos); 3583 __ emit_address(Latos); 3584 } 3585 3586 #ifdef ASSERT 3587 int seq = 0; 3588 #endif 3589 // btos 3590 { 3591 assert(btos == seq++, "btos has unexpected value"); 3592 FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version); 3593 __ bind(Lbtos); 3594 __ pop(btos); 3595 if (!is_static) pop_and_check_object(Robj); 3596 __ access_store_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false); 3597 if (!is_static && rc == may_rewrite) { 3598 patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no); 3599 } 3600 __ b(Done); 3601 } 3602 3603 // ztos 3604 { 3605 assert(ztos == seq++, "ztos has unexpected value"); 3606 FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version); 3607 __ bind(Lztos); 3608 __ pop(ztos); 3609 if (!is_static) pop_and_check_object(Robj); 3610 __ access_store_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false); 3611 if (!is_static && rc == may_rewrite) { 3612 patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no); 3613 } 3614 __ b(Done); 3615 } 3616 3617 // ctos 3618 { 3619 assert(ctos == seq++, "ctos has unexpected value"); 3620 FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version); 3621 __ bind(Lctos); 3622 __ pop(ctos); 3623 if (!is_static) pop_and_check_object(Robj); 3624 __ access_store_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false); 3625 if (!is_static && rc == may_rewrite) { 3626 patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no); 3627 } 3628 __ b(Done); 3629 } 3630 3631 // stos 3632 { 3633 assert(stos == seq++, "stos has unexpected value"); 3634 FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version); 3635 __ bind(Lstos); 3636 __ pop(stos); 3637 if (!is_static) pop_and_check_object(Robj); 3638 __ access_store_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false); 3639 if (!is_static && rc == may_rewrite) { 3640 patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no); 3641 } 3642 __ b(Done); 3643 } 3644 3645 // itos 3646 { 3647 assert(itos == seq++, "itos has unexpected value"); 3648 FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version); 3649 __ bind(Litos); 3650 __ b(shouldNotReachHere); 3651 } 3652 3653 // ltos 3654 { 3655 assert(ltos == seq++, "ltos has unexpected value"); 3656 FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version); 3657 __ bind(Lltos); 3658 __ pop(ltos); 3659 if (!is_static) pop_and_check_object(Robj); 3660 #ifdef AARCH64 3661 __ str(R0_tos, Address(Robj, Roffset)); 3662 #else 3663 __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false); 3664 #endif // AARCH64 3665 if (!is_static && rc == may_rewrite) { 3666 patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no); 3667 } 3668 __ b(Done); 3669 } 3670 3671 // ftos 3672 { 3673 assert(ftos == seq++, "ftos has unexpected value"); 3674 FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version); 3675 __ bind(Lftos); 3676 // floats and ints are placed on stack in the same way, so 3677 // we can use pop(itos) to transfer value without using VFP 3678 __ pop(itos); 3679 if (!is_static) pop_and_check_object(Robj); 3680 __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false); 3681 if (!is_static && rc == may_rewrite) { 3682 patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no); 3683 } 3684 __ b(Done); 3685 } 3686 3687 // dtos 3688 { 3689 assert(dtos == seq++, "dtos has unexpected value"); 3690 FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version); 3691 __ bind(Ldtos); 3692 // doubles and longs are placed on stack in the same way, so 3693 // we can use pop(ltos) to transfer value without using VFP 3694 __ pop(ltos); 3695 if (!is_static) pop_and_check_object(Robj); 3696 #ifdef AARCH64 3697 __ str(R0_tos, Address(Robj, Roffset)); 3698 #else 3699 __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false); 3700 #endif // AARCH64 3701 if (!is_static && rc == may_rewrite) { 3702 patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no); 3703 } 3704 __ b(Done); 3705 } 3706 3707 // atos 3708 { 3709 assert(atos == seq++, "dtos has unexpected value"); 3710 __ bind(Latos); 3711 __ pop(atos); 3712 if (!is_static) pop_and_check_object(Robj); 3713 // Store into the field 3714 do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false); 3715 if (!is_static && rc == may_rewrite) { 3716 patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no); 3717 } 3718 __ b(Done); 3719 } 3720 3721 __ bind(shouldNotReachHere); 3722 __ should_not_reach_here(); 3723 3724 // itos case is frequent and is moved outside table switch 3725 __ bind(Lint); 3726 __ pop(itos); 3727 if (!is_static) pop_and_check_object(Robj); 3728 __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false); 3729 if (!is_static && rc == may_rewrite) { 3730 patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no); 3731 } 3732 3733 __ bind(Done); 3734 3735 if (gen_volatile_check) { 3736 Label notVolatile; 3737 if (is_static) { 3738 // Just check for volatile. Memory barrier for static final field 3739 // is handled by class initialization. 3740 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3741 volatile_barrier(MacroAssembler::StoreLoad, Rtemp); 3742 __ bind(notVolatile); 3743 } else { 3744 // Check for volatile field and final field 3745 Label skipMembar; 3746 3747 __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift | 3748 1 << ConstantPoolCacheEntry::is_final_shift); 3749 __ b(skipMembar, eq); 3750 3751 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3752 3753 // StoreLoad barrier after volatile field write 3754 volatile_barrier(MacroAssembler::StoreLoad, Rtemp); 3755 __ b(skipMembar); 3756 3757 // StoreStore barrier after final field write 3758 __ bind(notVolatile); 3759 volatile_barrier(MacroAssembler::StoreStore, Rtemp); 3760 3761 __ bind(skipMembar); 3762 } 3763 } 3764 3765 } 3766 3767 void TemplateTable::putfield(int byte_no) { 3768 putfield_or_static(byte_no, false); 3769 } 3770 3771 void TemplateTable::nofast_putfield(int byte_no) { 3772 putfield_or_static(byte_no, false, may_not_rewrite); 3773 } 3774 3775 void TemplateTable::putstatic(int byte_no) { 3776 putfield_or_static(byte_no, true); 3777 } 3778 3779 3780 void TemplateTable::jvmti_post_fast_field_mod() { 3781 // This version of jvmti_post_fast_field_mod() is not used on ARM 3782 Unimplemented(); 3783 } 3784 3785 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, 3786 // but preserves tosca with the given state. 3787 void TemplateTable::jvmti_post_fast_field_mod(TosState state) { 3788 if (__ can_post_field_modification()) { 3789 // Check to see if a field modification watch has been set before we take 3790 // the time to call into the VM. 3791 Label done; 3792 3793 __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr()); 3794 __ cbz(R2, done); 3795 3796 __ pop_ptr(R3); // copy the object pointer from tos 3797 __ verify_oop(R3); 3798 __ push_ptr(R3); // put the object pointer back on tos 3799 3800 __ push(state); // save value on the stack 3801 3802 // access constant pool cache entry 3803 __ get_cache_entry_pointer_at_bcp(R2, R1, 1); 3804 3805 __ mov(R1, R3); 3806 assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code"); 3807 __ mov(R3, Rstack_top); // put tos addr into R3 3808 3809 // R1: object pointer copied above 3810 // R2: cache entry pointer 3811 // R3: jvalue object on the stack 3812 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3); 3813 3814 __ pop(state); // restore value 3815 3816 __ bind(done); 3817 } 3818 } 3819 3820 3821 void TemplateTable::fast_storefield(TosState state) { 3822 transition(state, vtos); 3823 3824 ByteSize base = ConstantPoolCache::base_offset(); 3825 3826 jvmti_post_fast_field_mod(state); 3827 3828 const Register Rcache = R2_tmp; 3829 const Register Rindex = R3_tmp; 3830 const Register Roffset = R3_tmp; 3831 const Register Rflags = Rtmp_save0; // R4/R19 3832 const Register Robj = R5_tmp; 3833 3834 const bool gen_volatile_check = os::is_MP(); 3835 3836 // access constant pool cache 3837 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3838 3839 __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3840 3841 if (gen_volatile_check) { 3842 // load flags to test volatile 3843 __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset())); 3844 } 3845 3846 // replace index with field offset from cache entry 3847 __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset())); 3848 3849 if (gen_volatile_check) { 3850 // Check for volatile store 3851 Label notVolatile; 3852 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3853 3854 // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier 3855 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 3856 3857 __ bind(notVolatile); 3858 } 3859 3860 // Get object from stack 3861 pop_and_check_object(Robj); 3862 3863 Address addr = Address(Robj, Roffset); 3864 // access field 3865 switch (bytecode()) { 3866 case Bytecodes::_fast_zputfield: 3867 __ access_store_at(T_BOOLEAN, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false); 3868 break; 3869 case Bytecodes::_fast_bputfield: 3870 __ access_store_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false); 3871 break; 3872 case Bytecodes::_fast_sputfield: 3873 __ access_store_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false); 3874 break; 3875 case Bytecodes::_fast_cputfield: 3876 __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false); 3877 break; 3878 case Bytecodes::_fast_iputfield: 3879 __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false); 3880 break; 3881 #ifdef AARCH64 3882 case Bytecodes::_fast_lputfield: __ str (R0_tos, addr); break; 3883 case Bytecodes::_fast_fputfield: __ str_s(S0_tos, addr); break; 3884 case Bytecodes::_fast_dputfield: __ str_d(D0_tos, addr); break; 3885 #else 3886 case Bytecodes::_fast_lputfield: 3887 __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); 3888 break; 3889 case Bytecodes::_fast_fputfield: 3890 __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); 3891 break; 3892 case Bytecodes::_fast_dputfield: 3893 __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); 3894 break; 3895 #endif // AARCH64 3896 3897 case Bytecodes::_fast_aputfield: 3898 do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false); 3899 break; 3900 3901 default: 3902 ShouldNotReachHere(); 3903 } 3904 3905 if (gen_volatile_check) { 3906 Label notVolatile; 3907 Label skipMembar; 3908 __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift | 3909 1 << ConstantPoolCacheEntry::is_final_shift); 3910 __ b(skipMembar, eq); 3911 3912 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3913 3914 // StoreLoad barrier after volatile field write 3915 volatile_barrier(MacroAssembler::StoreLoad, Rtemp); 3916 __ b(skipMembar); 3917 3918 // StoreStore barrier after final field write 3919 __ bind(notVolatile); 3920 volatile_barrier(MacroAssembler::StoreStore, Rtemp); 3921 3922 __ bind(skipMembar); 3923 } 3924 } 3925 3926 3927 void TemplateTable::fast_accessfield(TosState state) { 3928 transition(atos, state); 3929 3930 // do the JVMTI work here to avoid disturbing the register state below 3931 if (__ can_post_field_access()) { 3932 // Check to see if a field access watch has been set before we take 3933 // the time to call into the VM. 3934 Label done; 3935 __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr()); 3936 __ cbz(R2, done); 3937 // access constant pool cache entry 3938 __ get_cache_entry_pointer_at_bcp(R2, R1, 1); 3939 __ push_ptr(R0_tos); // save object pointer before call_VM() clobbers it 3940 __ verify_oop(R0_tos); 3941 __ mov(R1, R0_tos); 3942 // R1: object pointer copied above 3943 // R2: cache entry pointer 3944 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2); 3945 __ pop_ptr(R0_tos); // restore object pointer 3946 3947 __ bind(done); 3948 } 3949 3950 const Register Robj = R0_tos; 3951 const Register Rcache = R2_tmp; 3952 const Register Rflags = R2_tmp; 3953 const Register Rindex = R3_tmp; 3954 const Register Roffset = R3_tmp; 3955 3956 const bool gen_volatile_check = os::is_MP(); 3957 3958 // access constant pool cache 3959 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3960 // replace index with field offset from cache entry 3961 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3962 __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3963 3964 if (gen_volatile_check) { 3965 // load flags to test volatile 3966 __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 3967 } 3968 3969 __ verify_oop(Robj); 3970 __ null_check(Robj, Rtemp); 3971 3972 Address addr = Address(Robj, Roffset); 3973 // access field 3974 switch (bytecode()) { 3975 case Bytecodes::_fast_bgetfield: 3976 __ access_load_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg); 3977 break; 3978 case Bytecodes::_fast_sgetfield: 3979 __ access_load_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg); 3980 break; 3981 case Bytecodes::_fast_cgetfield: 3982 __ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg); 3983 break; 3984 case Bytecodes::_fast_igetfield: 3985 __ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg); 3986 break; 3987 #ifdef AARCH64 3988 case Bytecodes::_fast_lgetfield: __ ldr (R0_tos, addr); break; 3989 case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, addr); break; 3990 case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, addr); break; 3991 #else 3992 case Bytecodes::_fast_lgetfield: 3993 __ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg); 3994 break; 3995 case Bytecodes::_fast_fgetfield: 3996 __ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg); 3997 break; 3998 case Bytecodes::_fast_dgetfield: 3999 __ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg); 4000 break; 4001 #endif // AARCH64 4002 case Bytecodes::_fast_agetfield: 4003 do_oop_load(_masm, R0_tos, addr); 4004 __ verify_oop(R0_tos); 4005 break; 4006 default: 4007 ShouldNotReachHere(); 4008 } 4009 4010 if (gen_volatile_check) { 4011 // Check for volatile load 4012 Label notVolatile; 4013 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 4014 4015 // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier 4016 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 4017 4018 __ bind(notVolatile); 4019 } 4020 } 4021 4022 4023 void TemplateTable::fast_xaccess(TosState state) { 4024 transition(vtos, state); 4025 4026 const Register Robj = R1_tmp; 4027 const Register Rcache = R2_tmp; 4028 const Register Rindex = R3_tmp; 4029 const Register Roffset = R3_tmp; 4030 const Register Rflags = R4_tmp; 4031 Label done; 4032 4033 // get receiver 4034 __ ldr(Robj, aaddress(0)); 4035 4036 // access constant pool cache 4037 __ get_cache_and_index_at_bcp(Rcache, Rindex, 2); 4038 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 4039 __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 4040 4041 const bool gen_volatile_check = os::is_MP(); 4042 4043 if (gen_volatile_check) { 4044 // load flags to test volatile 4045 __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 4046 } 4047 4048 // make sure exception is reported in correct bcp range (getfield is next instruction) 4049 __ add(Rbcp, Rbcp, 1); 4050 __ null_check(Robj, Rtemp); 4051 __ sub(Rbcp, Rbcp, 1); 4052 4053 #ifdef AARCH64 4054 if (gen_volatile_check) { 4055 Label notVolatile; 4056 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 4057 4058 __ add(Rtemp, Robj, Roffset); 4059 4060 if (state == itos) { 4061 __ ldar_w(R0_tos, Rtemp); 4062 } else if (state == atos) { 4063 if (UseCompressedOops) { 4064 __ ldar_w(R0_tos, Rtemp); 4065 __ decode_heap_oop(R0_tos); 4066 } else { 4067 __ ldar(R0_tos, Rtemp); 4068 } 4069 __ verify_oop(R0_tos); 4070 } else if (state == ftos) { 4071 __ ldar_w(R0_tos, Rtemp); 4072 __ fmov_sw(S0_tos, R0_tos); 4073 } else { 4074 ShouldNotReachHere(); 4075 } 4076 __ b(done); 4077 4078 __ bind(notVolatile); 4079 } 4080 #endif // AARCH64 4081 4082 if (state == itos) { 4083 __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); 4084 } else if (state == atos) { 4085 do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); 4086 __ verify_oop(R0_tos); 4087 } else if (state == ftos) { 4088 #ifdef AARCH64 4089 __ ldr_s(S0_tos, Address(Robj, Roffset)); 4090 #else 4091 #ifdef __SOFTFP__ 4092 __ ldr(R0_tos, Address(Robj, Roffset)); 4093 #else 4094 __ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg); 4095 #endif // __SOFTFP__ 4096 #endif // AARCH64 4097 } else { 4098 ShouldNotReachHere(); 4099 } 4100 4101 #ifndef AARCH64 4102 if (gen_volatile_check) { 4103 // Check for volatile load 4104 Label notVolatile; 4105 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 4106 4107 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 4108 4109 __ bind(notVolatile); 4110 } 4111 #endif // !AARCH64 4112 4113 __ bind(done); 4114 } 4115 4116 4117 4118 //---------------------------------------------------------------------------------------------------- 4119 // Calls 4120 4121 void TemplateTable::count_calls(Register method, Register temp) { 4122 // implemented elsewhere 4123 ShouldNotReachHere(); 4124 } 4125 4126 4127 void TemplateTable::prepare_invoke(int byte_no, 4128 Register method, // linked method (or i-klass) 4129 Register index, // itable index, MethodType, etc. 4130 Register recv, // if caller wants to see it 4131 Register flags // if caller wants to test it 4132 ) { 4133 // determine flags 4134 const Bytecodes::Code code = bytecode(); 4135 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 4136 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 4137 const bool is_invokehandle = code == Bytecodes::_invokehandle; 4138 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 4139 const bool is_invokespecial = code == Bytecodes::_invokespecial; 4140 const bool load_receiver = (recv != noreg); 4141 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 4142 assert(recv == noreg || recv == R2, ""); 4143 assert(flags == noreg || flags == R3, ""); 4144 4145 // setup registers & access constant pool cache 4146 if (recv == noreg) recv = R2; 4147 if (flags == noreg) flags = R3; 4148 const Register temp = Rtemp; 4149 const Register ret_type = R1_tmp; 4150 assert_different_registers(method, index, flags, recv, LR, ret_type, temp); 4151 4152 // save 'interpreter return address' 4153 __ save_bcp(); 4154 4155 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 4156 4157 // maybe push extra argument 4158 if (is_invokedynamic || is_invokehandle) { 4159 Label L_no_push; 4160 __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push); 4161 __ mov(temp, index); 4162 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 4163 __ load_resolved_reference_at_index(index, temp); 4164 __ verify_oop(index); 4165 __ push_ptr(index); // push appendix (MethodType, CallSite, etc.) 4166 __ bind(L_no_push); 4167 } 4168 4169 // load receiver if needed (after extra argument is pushed so parameter size is correct) 4170 if (load_receiver) { 4171 __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask); // get parameter size 4172 Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv); 4173 __ ldr(recv, recv_addr); 4174 __ verify_oop(recv); 4175 } 4176 4177 // compute return type 4178 __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift); 4179 // Make sure we don't need to mask flags after the above shift 4180 ConstantPoolCacheEntry::verify_tos_state_shift(); 4181 // load return address 4182 { const address table = (address) Interpreter::invoke_return_entry_table_for(code); 4183 __ mov_slow(temp, table); 4184 __ ldr(LR, Address::indexed_ptr(temp, ret_type)); 4185 } 4186 } 4187 4188 4189 void TemplateTable::invokevirtual_helper(Register index, 4190 Register recv, 4191 Register flags) { 4192 4193 const Register recv_klass = R2_tmp; 4194 4195 assert_different_registers(index, recv, flags, Rtemp); 4196 assert_different_registers(index, recv_klass, R0_tmp, Rtemp); 4197 4198 // Test for an invoke of a final method 4199 Label notFinal; 4200 __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal); 4201 4202 assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention"); 4203 4204 // do the call - the index is actually the method to call 4205 4206 // It's final, need a null check here! 4207 __ null_check(recv, Rtemp); 4208 4209 // profile this call 4210 __ profile_final_call(R0_tmp); 4211 4212 __ jump_from_interpreted(Rmethod); 4213 4214 __ bind(notFinal); 4215 4216 // get receiver klass 4217 __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes()); 4218 __ load_klass(recv_klass, recv); 4219 4220 // profile this call 4221 __ profile_virtual_call(R0_tmp, recv_klass); 4222 4223 // get target Method* & entry point 4224 const int base = in_bytes(Klass::vtable_start_offset()); 4225 assert(vtableEntry::size() == 1, "adjust the scaling in the code below"); 4226 __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize)); 4227 __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes())); 4228 __ jump_from_interpreted(Rmethod); 4229 } 4230 4231 void TemplateTable::invokevirtual(int byte_no) { 4232 transition(vtos, vtos); 4233 assert(byte_no == f2_byte, "use this argument"); 4234 4235 const Register Rrecv = R2_tmp; 4236 const Register Rflags = R3_tmp; 4237 4238 prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags); 4239 4240 // Rmethod: index 4241 // Rrecv: receiver 4242 // Rflags: flags 4243 // LR: return address 4244 4245 invokevirtual_helper(Rmethod, Rrecv, Rflags); 4246 } 4247 4248 4249 void TemplateTable::invokespecial(int byte_no) { 4250 transition(vtos, vtos); 4251 assert(byte_no == f1_byte, "use this argument"); 4252 const Register Rrecv = R2_tmp; 4253 prepare_invoke(byte_no, Rmethod, noreg, Rrecv); 4254 __ verify_oop(Rrecv); 4255 __ null_check(Rrecv, Rtemp); 4256 // do the call 4257 __ profile_call(Rrecv); 4258 __ jump_from_interpreted(Rmethod); 4259 } 4260 4261 4262 void TemplateTable::invokestatic(int byte_no) { 4263 transition(vtos, vtos); 4264 assert(byte_no == f1_byte, "use this argument"); 4265 prepare_invoke(byte_no, Rmethod); 4266 // do the call 4267 __ profile_call(R2_tmp); 4268 __ jump_from_interpreted(Rmethod); 4269 } 4270 4271 4272 void TemplateTable::fast_invokevfinal(int byte_no) { 4273 transition(vtos, vtos); 4274 assert(byte_no == f2_byte, "use this argument"); 4275 __ stop("fast_invokevfinal is not used on ARM"); 4276 } 4277 4278 4279 void TemplateTable::invokeinterface(int byte_no) { 4280 transition(vtos, vtos); 4281 assert(byte_no == f1_byte, "use this argument"); 4282 4283 const Register Ritable = R1_tmp; 4284 const Register Rrecv = R2_tmp; 4285 const Register Rinterf = R5_tmp; 4286 const Register Rindex = R4_tmp; 4287 const Register Rflags = R3_tmp; 4288 const Register Rklass = R2_tmp; // Note! Same register with Rrecv 4289 4290 prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags); 4291 4292 // First check for Object case, then private interface method, 4293 // then regular interface method. 4294 4295 // Special case of invokeinterface called for virtual method of 4296 // java.lang.Object. See cpCache.cpp for details. 4297 Label notObjectMethod; 4298 __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod); 4299 invokevirtual_helper(Rmethod, Rrecv, Rflags); 4300 __ bind(notObjectMethod); 4301 4302 // Get receiver klass into Rklass - also a null check 4303 __ load_klass(Rklass, Rrecv); 4304 4305 // Check for private method invocation - indicated by vfinal 4306 Label no_such_interface; 4307 4308 Label notVFinal; 4309 __ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal); 4310 4311 Label subtype; 4312 __ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype); 4313 // If we get here the typecheck failed 4314 __ b(no_such_interface); 4315 __ bind(subtype); 4316 4317 // do the call 4318 __ profile_final_call(R0_tmp); 4319 __ jump_from_interpreted(Rmethod); 4320 4321 __ bind(notVFinal); 4322 4323 // Receiver subtype check against REFC. 4324 __ lookup_interface_method(// inputs: rec. class, interface 4325 Rklass, Rinterf, noreg, 4326 // outputs: scan temp. reg1, scan temp. reg2 4327 noreg, Ritable, Rtemp, 4328 no_such_interface); 4329 4330 // profile this call 4331 __ profile_virtual_call(R0_tmp, Rklass); 4332 4333 // Get declaring interface class from method 4334 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 4335 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 4336 __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes())); 4337 4338 // Get itable index from method 4339 __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset())); 4340 __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32 4341 __ neg(Rindex, Rtemp); 4342 4343 __ lookup_interface_method(// inputs: rec. class, interface 4344 Rklass, Rinterf, Rindex, 4345 // outputs: scan temp. reg1, scan temp. reg2 4346 Rmethod, Ritable, Rtemp, 4347 no_such_interface); 4348 4349 // Rmethod: Method* to call 4350 4351 // Check for abstract method error 4352 // Note: This should be done more efficiently via a throw_abstract_method_error 4353 // interpreter entry point and a conditional jump to it in case of a null 4354 // method. 4355 { Label L; 4356 __ cbnz(Rmethod, L); 4357 // throw exception 4358 // note: must restore interpreter registers to canonical 4359 // state for exception handling to work correctly! 4360 __ restore_method(); 4361 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 4362 // the call_VM checks for exception, so we should never return here. 4363 __ should_not_reach_here(); 4364 __ bind(L); 4365 } 4366 4367 // do the call 4368 __ jump_from_interpreted(Rmethod); 4369 4370 // throw exception 4371 __ bind(no_such_interface); 4372 __ restore_method(); 4373 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 4374 // the call_VM checks for exception, so we should never return here. 4375 __ should_not_reach_here(); 4376 } 4377 4378 void TemplateTable::invokehandle(int byte_no) { 4379 transition(vtos, vtos); 4380 4381 // TODO-AARCH64 review register usage 4382 const Register Rrecv = R2_tmp; 4383 const Register Rmtype = R4_tmp; 4384 const Register R5_method = R5_tmp; // can't reuse Rmethod! 4385 4386 prepare_invoke(byte_no, R5_method, Rmtype, Rrecv); 4387 __ null_check(Rrecv, Rtemp); 4388 4389 // Rmtype: MethodType object (from cpool->resolved_references[f1], if necessary) 4390 // Rmethod: MH.invokeExact_MT method (from f2) 4391 4392 // Note: Rmtype is already pushed (if necessary) by prepare_invoke 4393 4394 // do the call 4395 __ profile_final_call(R3_tmp); // FIXME: profile the LambdaForm also 4396 __ mov(Rmethod, R5_method); 4397 __ jump_from_interpreted(Rmethod); 4398 } 4399 4400 void TemplateTable::invokedynamic(int byte_no) { 4401 transition(vtos, vtos); 4402 4403 // TODO-AARCH64 review register usage 4404 const Register Rcallsite = R4_tmp; 4405 const Register R5_method = R5_tmp; // can't reuse Rmethod! 4406 4407 prepare_invoke(byte_no, R5_method, Rcallsite); 4408 4409 // Rcallsite: CallSite object (from cpool->resolved_references[f1]) 4410 // Rmethod: MH.linkToCallSite method (from f2) 4411 4412 // Note: Rcallsite is already pushed by prepare_invoke 4413 4414 if (ProfileInterpreter) { 4415 __ profile_call(R2_tmp); 4416 } 4417 4418 // do the call 4419 __ mov(Rmethod, R5_method); 4420 __ jump_from_interpreted(Rmethod); 4421 } 4422 4423 //---------------------------------------------------------------------------------------------------- 4424 // Allocation 4425 4426 void TemplateTable::_new() { 4427 transition(vtos, atos); 4428 4429 const Register Robj = R0_tos; 4430 const Register Rcpool = R1_tmp; 4431 const Register Rindex = R2_tmp; 4432 const Register Rtags = R3_tmp; 4433 const Register Rsize = R3_tmp; 4434 4435 Register Rklass = R4_tmp; 4436 assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp); 4437 assert_different_registers(Rcpool, Rindex, Rklass, Rsize); 4438 4439 Label slow_case; 4440 Label done; 4441 Label initialize_header; 4442 Label initialize_object; // including clearing the fields 4443 4444 const bool allow_shared_alloc = 4445 Universe::heap()->supports_inline_contig_alloc(); 4446 4447 // Literals 4448 InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL); 4449 4450 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4451 __ get_cpool_and_tags(Rcpool, Rtags); 4452 4453 // Make sure the class we're about to instantiate has been resolved. 4454 // This is done before loading InstanceKlass to be consistent with the order 4455 // how Constant Pool is updated (see ConstantPool::klass_at_put) 4456 const int tags_offset = Array<u1>::base_offset_in_bytes(); 4457 __ add(Rtemp, Rtags, Rindex); 4458 4459 #ifdef AARCH64 4460 __ add(Rtemp, Rtemp, tags_offset); 4461 __ ldarb(Rtemp, Rtemp); 4462 #else 4463 __ ldrb(Rtemp, Address(Rtemp, tags_offset)); 4464 4465 // use Rklass as a scratch 4466 volatile_barrier(MacroAssembler::LoadLoad, Rklass); 4467 #endif // AARCH64 4468 4469 // get InstanceKlass 4470 __ cmp(Rtemp, JVM_CONSTANT_Class); 4471 __ b(slow_case, ne); 4472 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass); 4473 4474 // make sure klass is initialized & doesn't have finalizer 4475 // make sure klass is fully initialized 4476 __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset())); 4477 __ cmp(Rtemp, InstanceKlass::fully_initialized); 4478 __ b(slow_case, ne); 4479 4480 // get instance_size in InstanceKlass (scaled to a count of bytes) 4481 __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset())); 4482 4483 // test to see if it has a finalizer or is malformed in some way 4484 // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number 4485 __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case); 4486 4487 // Allocate the instance: 4488 // If TLAB is enabled: 4489 // Try to allocate in the TLAB. 4490 // If fails, go to the slow path. 4491 // Else If inline contiguous allocations are enabled: 4492 // Try to allocate in eden. 4493 // If fails due to heap end, go to slow path. 4494 // 4495 // If TLAB is enabled OR inline contiguous is enabled: 4496 // Initialize the allocation. 4497 // Exit. 4498 // 4499 // Go to slow path. 4500 if (UseTLAB) { 4501 const Register Rtlab_top = R1_tmp; 4502 const Register Rtlab_end = R2_tmp; 4503 assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end); 4504 4505 __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset())); 4506 __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset()))); 4507 __ add(Rtlab_top, Robj, Rsize); 4508 __ cmp(Rtlab_top, Rtlab_end); 4509 __ b(slow_case, hi); 4510 __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset())); 4511 if (ZeroTLAB) { 4512 // the fields have been already cleared 4513 __ b(initialize_header); 4514 } else { 4515 // initialize both the header and fields 4516 __ b(initialize_object); 4517 } 4518 } else { 4519 // Allocation in the shared Eden, if allowed. 4520 if (allow_shared_alloc) { 4521 const Register Rheap_top_addr = R2_tmp; 4522 const Register Rheap_top = R5_tmp; 4523 const Register Rheap_end = Rtemp; 4524 assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR); 4525 4526 // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS 4527 __ ldr_literal(Rheap_top_addr, Lheap_top_addr); 4528 4529 Label retry; 4530 __ bind(retry); 4531 4532 #ifdef AARCH64 4533 __ ldxr(Robj, Rheap_top_addr); 4534 #else 4535 __ ldr(Robj, Address(Rheap_top_addr)); 4536 #endif // AARCH64 4537 4538 __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr())); 4539 __ add(Rheap_top, Robj, Rsize); 4540 __ cmp(Rheap_top, Rheap_end); 4541 __ b(slow_case, hi); 4542 4543 // Update heap top atomically. 4544 // If someone beats us on the allocation, try again, otherwise continue. 4545 #ifdef AARCH64 4546 __ stxr(Rtemp2, Rheap_top, Rheap_top_addr); 4547 __ cbnz_w(Rtemp2, retry); 4548 #else 4549 __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/); 4550 __ b(retry, ne); 4551 #endif // AARCH64 4552 4553 __ incr_allocated_bytes(Rsize, Rtemp); 4554 } 4555 } 4556 4557 if (UseTLAB || allow_shared_alloc) { 4558 const Register Rzero0 = R1_tmp; 4559 const Register Rzero1 = R2_tmp; 4560 const Register Rzero_end = R5_tmp; 4561 const Register Rzero_cur = Rtemp; 4562 assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end); 4563 4564 // The object is initialized before the header. If the object size is 4565 // zero, go directly to the header initialization. 4566 __ bind(initialize_object); 4567 __ subs(Rsize, Rsize, sizeof(oopDesc)); 4568 __ add(Rzero_cur, Robj, sizeof(oopDesc)); 4569 __ b(initialize_header, eq); 4570 4571 #ifdef ASSERT 4572 // make sure Rsize is a multiple of 8 4573 Label L; 4574 __ tst(Rsize, 0x07); 4575 __ b(L, eq); 4576 __ stop("object size is not multiple of 8 - adjust this code"); 4577 __ bind(L); 4578 #endif 4579 4580 #ifdef AARCH64 4581 { 4582 Label loop; 4583 // Step back by 1 word if object size is not a multiple of 2*wordSize. 4584 assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word"); 4585 __ andr(Rtemp2, Rsize, (uintx)wordSize); 4586 __ sub(Rzero_cur, Rzero_cur, Rtemp2); 4587 4588 // Zero by 2 words per iteration. 4589 __ bind(loop); 4590 __ subs(Rsize, Rsize, 2*wordSize); 4591 __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed)); 4592 __ b(loop, gt); 4593 } 4594 #else 4595 __ mov(Rzero0, 0); 4596 __ mov(Rzero1, 0); 4597 __ add(Rzero_end, Rzero_cur, Rsize); 4598 4599 // initialize remaining object fields: Rsize was a multiple of 8 4600 { Label loop; 4601 // loop is unrolled 2 times 4602 __ bind(loop); 4603 // #1 4604 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback); 4605 __ cmp(Rzero_cur, Rzero_end); 4606 // #2 4607 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne); 4608 __ cmp(Rzero_cur, Rzero_end, ne); 4609 __ b(loop, ne); 4610 } 4611 #endif // AARCH64 4612 4613 // initialize object header only. 4614 __ bind(initialize_header); 4615 if (UseBiasedLocking) { 4616 __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset())); 4617 } else { 4618 __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype()); 4619 } 4620 // mark 4621 __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes())); 4622 4623 // klass 4624 #ifdef AARCH64 4625 __ store_klass_gap(Robj); 4626 #endif // AARCH64 4627 __ store_klass(Rklass, Robj); // blows Rklass: 4628 Rklass = noreg; 4629 4630 // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation 4631 if (DTraceAllocProbes) { 4632 // Trigger dtrace event for fastpath 4633 Label Lcontinue; 4634 4635 __ ldrb_global(Rtemp, (address)&DTraceAllocProbes); 4636 __ cbz(Rtemp, Lcontinue); 4637 4638 __ push(atos); 4639 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj); 4640 __ pop(atos); 4641 4642 __ bind(Lcontinue); 4643 } 4644 4645 __ b(done); 4646 } else { 4647 // jump over literals 4648 __ b(slow_case); 4649 } 4650 4651 if (allow_shared_alloc) { 4652 __ bind_literal(Lheap_top_addr); 4653 } 4654 4655 // slow case 4656 __ bind(slow_case); 4657 __ get_constant_pool(Rcpool); 4658 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4659 __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 4660 4661 // continue 4662 __ bind(done); 4663 4664 // StoreStore barrier required after complete initialization 4665 // (headers + content zeroing), before the object may escape. 4666 __ membar(MacroAssembler::StoreStore, R1_tmp); 4667 } 4668 4669 4670 void TemplateTable::newarray() { 4671 transition(itos, atos); 4672 __ ldrb(R1, at_bcp(1)); 4673 __ mov(R2, R0_tos); 4674 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2); 4675 // MacroAssembler::StoreStore useless (included in the runtime exit path) 4676 } 4677 4678 4679 void TemplateTable::anewarray() { 4680 transition(itos, atos); 4681 __ get_unsigned_2_byte_index_at_bcp(R2, 1); 4682 __ get_constant_pool(R1); 4683 __ mov(R3, R0_tos); 4684 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3); 4685 // MacroAssembler::StoreStore useless (included in the runtime exit path) 4686 } 4687 4688 4689 void TemplateTable::arraylength() { 4690 transition(atos, itos); 4691 __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes()); 4692 __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes())); 4693 } 4694 4695 4696 void TemplateTable::checkcast() { 4697 transition(atos, atos); 4698 Label done, is_null, quicked, resolved, throw_exception; 4699 4700 const Register Robj = R0_tos; 4701 const Register Rcpool = R2_tmp; 4702 const Register Rtags = R3_tmp; 4703 const Register Rindex = R4_tmp; 4704 const Register Rsuper = R3_tmp; 4705 const Register Rsub = R4_tmp; 4706 const Register Rsubtype_check_tmp1 = R1_tmp; 4707 const Register Rsubtype_check_tmp2 = LR_tmp; 4708 4709 __ cbz(Robj, is_null); 4710 4711 // Get cpool & tags index 4712 __ get_cpool_and_tags(Rcpool, Rtags); 4713 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4714 4715 // See if bytecode has already been quicked 4716 __ add(Rtemp, Rtags, Rindex); 4717 #ifdef AARCH64 4718 // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough 4719 __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes()); 4720 __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier 4721 #else 4722 __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes())); 4723 #endif // AARCH64 4724 4725 __ cmp(Rtemp, JVM_CONSTANT_Class); 4726 4727 #ifndef AARCH64 4728 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true); 4729 #endif // !AARCH64 4730 4731 __ b(quicked, eq); 4732 4733 __ push(atos); 4734 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4735 // vm_result_2 has metadata result 4736 __ get_vm_result_2(Rsuper, Robj); 4737 __ pop_ptr(Robj); 4738 __ b(resolved); 4739 4740 __ bind(throw_exception); 4741 // Come here on failure of subtype check 4742 __ profile_typecheck_failed(R1_tmp); 4743 __ mov(R2_ClassCastException_obj, Robj); // convention with generate_ClassCastException_handler() 4744 __ b(Interpreter::_throw_ClassCastException_entry); 4745 4746 // Get superklass in Rsuper and subklass in Rsub 4747 __ bind(quicked); 4748 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper); 4749 4750 __ bind(resolved); 4751 __ load_klass(Rsub, Robj); 4752 4753 // Generate subtype check. Blows both tmps and Rtemp. 4754 assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp); 4755 __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2); 4756 4757 // Come here on success 4758 4759 // Collect counts on whether this check-cast sees NULLs a lot or not. 4760 if (ProfileInterpreter) { 4761 __ b(done); 4762 __ bind(is_null); 4763 __ profile_null_seen(R1_tmp); 4764 } else { 4765 __ bind(is_null); // same as 'done' 4766 } 4767 __ bind(done); 4768 } 4769 4770 4771 void TemplateTable::instanceof() { 4772 // result = 0: obj == NULL or obj is not an instanceof the specified klass 4773 // result = 1: obj != NULL and obj is an instanceof the specified klass 4774 4775 transition(atos, itos); 4776 Label done, is_null, not_subtype, quicked, resolved; 4777 4778 const Register Robj = R0_tos; 4779 const Register Rcpool = R2_tmp; 4780 const Register Rtags = R3_tmp; 4781 const Register Rindex = R4_tmp; 4782 const Register Rsuper = R3_tmp; 4783 const Register Rsub = R4_tmp; 4784 const Register Rsubtype_check_tmp1 = R0_tmp; 4785 const Register Rsubtype_check_tmp2 = R1_tmp; 4786 4787 __ cbz(Robj, is_null); 4788 4789 __ load_klass(Rsub, Robj); 4790 4791 // Get cpool & tags index 4792 __ get_cpool_and_tags(Rcpool, Rtags); 4793 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4794 4795 // See if bytecode has already been quicked 4796 __ add(Rtemp, Rtags, Rindex); 4797 #ifdef AARCH64 4798 // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough 4799 __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes()); 4800 __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier 4801 #else 4802 __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes())); 4803 #endif // AARCH64 4804 __ cmp(Rtemp, JVM_CONSTANT_Class); 4805 4806 #ifndef AARCH64 4807 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true); 4808 #endif // !AARCH64 4809 4810 __ b(quicked, eq); 4811 4812 __ push(atos); 4813 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4814 // vm_result_2 has metadata result 4815 __ get_vm_result_2(Rsuper, Robj); 4816 __ pop_ptr(Robj); 4817 __ b(resolved); 4818 4819 // Get superklass in Rsuper and subklass in Rsub 4820 __ bind(quicked); 4821 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper); 4822 4823 __ bind(resolved); 4824 __ load_klass(Rsub, Robj); 4825 4826 // Generate subtype check. Blows both tmps and Rtemp. 4827 __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2); 4828 4829 // Come here on success 4830 __ mov(R0_tos, 1); 4831 __ b(done); 4832 4833 __ bind(not_subtype); 4834 // Come here on failure 4835 __ profile_typecheck_failed(R1_tmp); 4836 __ mov(R0_tos, 0); 4837 4838 // Collect counts on whether this test sees NULLs a lot or not. 4839 if (ProfileInterpreter) { 4840 __ b(done); 4841 __ bind(is_null); 4842 __ profile_null_seen(R1_tmp); 4843 } else { 4844 __ bind(is_null); // same as 'done' 4845 } 4846 __ bind(done); 4847 } 4848 4849 4850 //---------------------------------------------------------------------------------------------------- 4851 // Breakpoints 4852 void TemplateTable::_breakpoint() { 4853 4854 // Note: We get here even if we are single stepping.. 4855 // jbug inists on setting breakpoints at every bytecode 4856 // even if we are in single step mode. 4857 4858 transition(vtos, vtos); 4859 4860 // get the unpatched byte code 4861 __ mov(R1, Rmethod); 4862 __ mov(R2, Rbcp); 4863 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2); 4864 #ifdef AARCH64 4865 __ sxtw(Rtmp_save0, R0); 4866 #else 4867 __ mov(Rtmp_save0, R0); 4868 #endif // AARCH64 4869 4870 // post the breakpoint event 4871 __ mov(R1, Rmethod); 4872 __ mov(R2, Rbcp); 4873 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2); 4874 4875 // complete the execution of original bytecode 4876 __ mov(R3_bytecode, Rtmp_save0); 4877 __ dispatch_only_normal(vtos); 4878 } 4879 4880 4881 //---------------------------------------------------------------------------------------------------- 4882 // Exceptions 4883 4884 void TemplateTable::athrow() { 4885 transition(atos, vtos); 4886 __ mov(Rexception_obj, R0_tos); 4887 __ null_check(Rexception_obj, Rtemp); 4888 __ b(Interpreter::throw_exception_entry()); 4889 } 4890 4891 4892 //---------------------------------------------------------------------------------------------------- 4893 // Synchronization 4894 // 4895 // Note: monitorenter & exit are symmetric routines; which is reflected 4896 // in the assembly code structure as well 4897 // 4898 // Stack layout: 4899 // 4900 // [expressions ] <--- Rstack_top = expression stack top 4901 // .. 4902 // [expressions ] 4903 // [monitor entry] <--- monitor block top = expression stack bot 4904 // .. 4905 // [monitor entry] 4906 // [frame data ] <--- monitor block bot 4907 // ... 4908 // [saved FP ] <--- FP 4909 4910 4911 void TemplateTable::monitorenter() { 4912 transition(atos, vtos); 4913 4914 const Register Robj = R0_tos; 4915 const Register Rentry = R1_tmp; 4916 4917 // check for NULL object 4918 __ null_check(Robj, Rtemp); 4919 4920 const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize); 4921 assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment"); 4922 Label allocate_monitor, allocated; 4923 4924 // initialize entry pointer 4925 __ mov(Rentry, 0); // points to free slot or NULL 4926 4927 // find a free slot in the monitor block (result in Rentry) 4928 { Label loop, exit; 4929 const Register Rcur = R2_tmp; 4930 const Register Rcur_obj = Rtemp; 4931 const Register Rbottom = R3_tmp; 4932 assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj); 4933 4934 __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4935 // points to current entry, starting with top-most entry 4936 __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); 4937 // points to word before bottom of monitor block 4938 4939 __ cmp(Rcur, Rbottom); // check if there are no monitors 4940 #ifndef AARCH64 4941 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 4942 // prefetch monitor's object for the first iteration 4943 #endif // !AARCH64 4944 __ b(allocate_monitor, eq); // there are no monitors, skip searching 4945 4946 __ bind(loop); 4947 #ifdef AARCH64 4948 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); 4949 #endif // AARCH64 4950 __ cmp(Rcur_obj, 0); // check if current entry is used 4951 __ mov(Rentry, Rcur, eq); // if not used then remember entry 4952 4953 __ cmp(Rcur_obj, Robj); // check if current entry is for same object 4954 __ b(exit, eq); // if same object then stop searching 4955 4956 __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry 4957 4958 __ cmp(Rcur, Rbottom); // check if bottom reached 4959 #ifndef AARCH64 4960 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 4961 // prefetch monitor's object for the next iteration 4962 #endif // !AARCH64 4963 __ b(loop, ne); // if not at bottom then check this entry 4964 __ bind(exit); 4965 } 4966 4967 __ cbnz(Rentry, allocated); // check if a slot has been found; if found, continue with that one 4968 4969 __ bind(allocate_monitor); 4970 4971 // allocate one if there's no free slot 4972 { Label loop; 4973 assert_different_registers(Robj, Rentry, R2_tmp, Rtemp); 4974 4975 // 1. compute new pointers 4976 4977 #ifdef AARCH64 4978 __ check_extended_sp(Rtemp); 4979 __ sub(SP, SP, entry_size); // adjust extended SP 4980 __ mov(Rtemp, SP); 4981 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 4982 #endif // AARCH64 4983 4984 __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4985 // old monitor block top / expression stack bottom 4986 4987 __ sub(Rstack_top, Rstack_top, entry_size); // move expression stack top 4988 __ check_stack_top_on_expansion(); 4989 4990 __ sub(Rentry, Rentry, entry_size); // move expression stack bottom 4991 4992 __ mov(R2_tmp, Rstack_top); // set start value for copy loop 4993 4994 __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4995 // set new monitor block top 4996 4997 // 2. move expression stack contents 4998 4999 __ cmp(R2_tmp, Rentry); // check if expression stack is empty 5000 #ifndef AARCH64 5001 __ ldr(Rtemp, Address(R2_tmp, entry_size), ne); // load expression stack word from old location 5002 #endif // !AARCH64 5003 __ b(allocated, eq); 5004 5005 __ bind(loop); 5006 #ifdef AARCH64 5007 __ ldr(Rtemp, Address(R2_tmp, entry_size)); // load expression stack word from old location 5008 #endif // AARCH64 5009 __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location 5010 // and advance to next word 5011 __ cmp(R2_tmp, Rentry); // check if bottom reached 5012 #ifndef AARCH64 5013 __ ldr(Rtemp, Address(R2, entry_size), ne); // load expression stack word from old location 5014 #endif // !AARCH64 5015 __ b(loop, ne); // if not at bottom then copy next word 5016 } 5017 5018 // call run-time routine 5019 5020 // Rentry: points to monitor entry 5021 __ bind(allocated); 5022 5023 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 5024 // The object has already been poped from the stack, so the expression stack looks correct. 5025 __ add(Rbcp, Rbcp, 1); 5026 5027 __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes())); // store object 5028 __ lock_object(Rentry); 5029 5030 // check to make sure this monitor doesn't cause stack overflow after locking 5031 __ save_bcp(); // in case of exception 5032 __ arm_stack_overflow_check(0, Rtemp); 5033 5034 // The bcp has already been incremented. Just need to dispatch to next instruction. 5035 __ dispatch_next(vtos); 5036 } 5037 5038 5039 void TemplateTable::monitorexit() { 5040 transition(atos, vtos); 5041 5042 const Register Robj = R0_tos; 5043 const Register Rcur = R1_tmp; 5044 const Register Rbottom = R2_tmp; 5045 const Register Rcur_obj = Rtemp; 5046 5047 // check for NULL object 5048 __ null_check(Robj, Rtemp); 5049 5050 const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize); 5051 Label found, throw_exception; 5052 5053 // find matching slot 5054 { Label loop; 5055 assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj); 5056 5057 __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 5058 // points to current entry, starting with top-most entry 5059 __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); 5060 // points to word before bottom of monitor block 5061 5062 __ cmp(Rcur, Rbottom); // check if bottom reached 5063 #ifndef AARCH64 5064 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 5065 // prefetch monitor's object for the first iteration 5066 #endif // !AARCH64 5067 __ b(throw_exception, eq); // throw exception if there are now monitors 5068 5069 __ bind(loop); 5070 #ifdef AARCH64 5071 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); 5072 #endif // AARCH64 5073 // check if current entry is for same object 5074 __ cmp(Rcur_obj, Robj); 5075 __ b(found, eq); // if same object then stop searching 5076 __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry 5077 __ cmp(Rcur, Rbottom); // check if bottom reached 5078 #ifndef AARCH64 5079 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 5080 #endif // !AARCH64 5081 __ b (loop, ne); // if not at bottom then check this entry 5082 } 5083 5084 // error handling. Unlocking was not block-structured 5085 __ bind(throw_exception); 5086 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 5087 __ should_not_reach_here(); 5088 5089 // call run-time routine 5090 // Rcur: points to monitor entry 5091 __ bind(found); 5092 __ push_ptr(Robj); // make sure object is on stack (contract with oopMaps) 5093 __ unlock_object(Rcur); 5094 __ pop_ptr(Robj); // discard object 5095 } 5096 5097 5098 //---------------------------------------------------------------------------------------------------- 5099 // Wide instructions 5100 5101 void TemplateTable::wide() { 5102 transition(vtos, vtos); 5103 __ ldrb(R3_bytecode, at_bcp(1)); 5104 5105 InlinedAddress Ltable((address)Interpreter::_wentry_point); 5106 __ ldr_literal(Rtemp, Ltable); 5107 __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp); 5108 5109 __ nop(); // to avoid filling CPU pipeline with invalid instructions 5110 __ nop(); 5111 __ bind_literal(Ltable); 5112 } 5113 5114 5115 //---------------------------------------------------------------------------------------------------- 5116 // Multi arrays 5117 5118 void TemplateTable::multianewarray() { 5119 transition(vtos, atos); 5120 __ ldrb(Rtmp_save0, at_bcp(3)); // get number of dimensions 5121 5122 // last dim is on top of stack; we want address of first one: 5123 // first_addr = last_addr + ndims * stackElementSize - 1*wordsize 5124 // the latter wordSize to point to the beginning of the array. 5125 __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize)); 5126 __ sub(R1, Rtemp, wordSize); 5127 5128 call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1); 5129 __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize)); 5130 // MacroAssembler::StoreStore useless (included in the runtime exit path) 5131 }