1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/interp_masm.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/templateTable.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/cpCache.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/frame.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/synchronizer.hpp" 41 42 #define __ _masm-> 43 44 //---------------------------------------------------------------------------------------------------- 45 // Platform-dependent initialization 46 47 void TemplateTable::pd_initialize() { 48 // No arm specific initialization 49 } 50 51 //---------------------------------------------------------------------------------------------------- 52 // Address computation 53 54 // local variables 55 static inline Address iaddress(int n) { 56 return Address(Rlocals, Interpreter::local_offset_in_bytes(n)); 57 } 58 59 static inline Address laddress(int n) { return iaddress(n + 1); } 60 #ifndef AARCH64 61 static inline Address haddress(int n) { return iaddress(n + 0); } 62 #endif // !AARCH64 63 64 static inline Address faddress(int n) { return iaddress(n); } 65 static inline Address daddress(int n) { return laddress(n); } 66 static inline Address aaddress(int n) { return iaddress(n); } 67 68 69 void TemplateTable::get_local_base_addr(Register r, Register index) { 70 __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize)); 71 } 72 73 Address TemplateTable::load_iaddress(Register index, Register scratch) { 74 #ifdef AARCH64 75 get_local_base_addr(scratch, index); 76 return Address(scratch); 77 #else 78 return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset); 79 #endif // AARCH64 80 } 81 82 Address TemplateTable::load_aaddress(Register index, Register scratch) { 83 return load_iaddress(index, scratch); 84 } 85 86 Address TemplateTable::load_faddress(Register index, Register scratch) { 87 #ifdef __SOFTFP__ 88 return load_iaddress(index, scratch); 89 #else 90 get_local_base_addr(scratch, index); 91 return Address(scratch); 92 #endif // __SOFTFP__ 93 } 94 95 Address TemplateTable::load_daddress(Register index, Register scratch) { 96 get_local_base_addr(scratch, index); 97 return Address(scratch, Interpreter::local_offset_in_bytes(1)); 98 } 99 100 // At top of Java expression stack which may be different than SP. 101 // It isn't for category 1 objects. 102 static inline Address at_tos() { 103 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0)); 104 } 105 106 static inline Address at_tos_p1() { 107 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1)); 108 } 109 110 static inline Address at_tos_p2() { 111 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2)); 112 } 113 114 115 // 32-bit ARM: 116 // Loads double/long local into R0_tos_lo/R1_tos_hi with two 117 // separate ldr instructions (supports nonadjacent values). 118 // Used for longs in all modes, and for doubles in SOFTFP mode. 119 // 120 // AArch64: loads long local into R0_tos. 121 // 122 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) { 123 const Register Rlocal_base = tmp; 124 assert_different_registers(Rlocal_index, tmp); 125 126 get_local_base_addr(Rlocal_base, Rlocal_index); 127 #ifdef AARCH64 128 __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 129 #else 130 __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 131 __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0))); 132 #endif // AARCH64 133 } 134 135 136 // 32-bit ARM: 137 // Stores R0_tos_lo/R1_tos_hi to double/long local with two 138 // separate str instructions (supports nonadjacent values). 139 // Used for longs in all modes, and for doubles in SOFTFP mode 140 // 141 // AArch64: stores R0_tos to long local. 142 // 143 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) { 144 const Register Rlocal_base = tmp; 145 assert_different_registers(Rlocal_index, tmp); 146 147 get_local_base_addr(Rlocal_base, Rlocal_index); 148 #ifdef AARCH64 149 __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 150 #else 151 __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); 152 __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0))); 153 #endif // AARCH64 154 } 155 156 // Returns address of Java array element using temp register as address base. 157 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) { 158 int logElemSize = exact_log2(type2aelembytes(elemType)); 159 __ add_ptr_scaled_int32(temp, array, index, logElemSize); 160 return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType)); 161 } 162 163 //---------------------------------------------------------------------------------------------------- 164 // Condition conversion 165 AsmCondition convNegCond(TemplateTable::Condition cc) { 166 switch (cc) { 167 case TemplateTable::equal : return ne; 168 case TemplateTable::not_equal : return eq; 169 case TemplateTable::less : return ge; 170 case TemplateTable::less_equal : return gt; 171 case TemplateTable::greater : return le; 172 case TemplateTable::greater_equal: return lt; 173 } 174 ShouldNotReachHere(); 175 return nv; 176 } 177 178 //---------------------------------------------------------------------------------------------------- 179 // Miscelaneous helper routines 180 181 // Store an oop (or NULL) at the address described by obj. 182 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). 183 // Also destroys new_val and obj.base(). 184 static void do_oop_store(InterpreterMacroAssembler* _masm, 185 Address obj, 186 Register new_val, 187 Register tmp1, 188 Register tmp2, 189 Register tmp3, 190 BarrierSet::Name barrier, 191 bool precise, 192 bool is_null) { 193 194 assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg); 195 switch (barrier) { 196 #if INCLUDE_ALL_GCS 197 case BarrierSet::G1BarrierSet: 198 { 199 // flatten object address if needed 200 assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here"); 201 202 const Register store_addr = obj.base(); 203 if (obj.index() != noreg) { 204 assert (obj.disp() == 0, "index or displacement, not both"); 205 #ifdef AARCH64 206 __ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm()); 207 #else 208 assert(obj.offset_op() == add_offset, "addition is expected"); 209 __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm())); 210 #endif // AARCH64 211 } else if (obj.disp() != 0) { 212 __ add(store_addr, obj.base(), obj.disp()); 213 } 214 215 __ g1_write_barrier_pre(store_addr, new_val, tmp1, tmp2, tmp3); 216 if (is_null) { 217 __ store_heap_oop_null(new_val, Address(store_addr)); 218 } else { 219 // G1 barrier needs uncompressed oop for region cross check. 220 Register val_to_store = new_val; 221 if (UseCompressedOops) { 222 val_to_store = tmp1; 223 __ mov(val_to_store, new_val); 224 } 225 __ store_heap_oop(val_to_store, Address(store_addr)); // blows val_to_store: 226 val_to_store = noreg; 227 __ g1_write_barrier_post(store_addr, new_val, tmp1, tmp2, tmp3); 228 } 229 } 230 break; 231 case BarrierSet::Epsilon: 232 { 233 if (is_null) { 234 __ store_heap_oop_null(new_val, obj); 235 } else { 236 __ store_heap_oop(new_val, obj); // blows new_val: 237 new_val = noreg; 238 } 239 } 240 break; 241 #endif // INCLUDE_ALL_GCS 242 case BarrierSet::CardTableBarrierSet: 243 { 244 if (is_null) { 245 __ store_heap_oop_null(new_val, obj); 246 } else { 247 assert (!precise || (obj.index() == noreg && obj.disp() == 0), 248 "store check address should be calculated beforehand"); 249 250 __ store_check_part1(tmp1); 251 __ store_heap_oop(new_val, obj); // blows new_val: 252 new_val = noreg; 253 __ store_check_part2(obj.base(), tmp1, tmp2); 254 } 255 } 256 break; 257 case BarrierSet::ModRef: 258 ShouldNotReachHere(); 259 break; 260 default: 261 ShouldNotReachHere(); 262 break; 263 } 264 } 265 266 Address TemplateTable::at_bcp(int offset) { 267 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 268 return Address(Rbcp, offset); 269 } 270 271 272 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. 273 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 274 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 275 int byte_no) { 276 assert_different_registers(bc_reg, temp_reg); 277 if (!RewriteBytecodes) return; 278 Label L_patch_done; 279 280 switch (bc) { 281 case Bytecodes::_fast_aputfield: 282 case Bytecodes::_fast_bputfield: 283 case Bytecodes::_fast_zputfield: 284 case Bytecodes::_fast_cputfield: 285 case Bytecodes::_fast_dputfield: 286 case Bytecodes::_fast_fputfield: 287 case Bytecodes::_fast_iputfield: 288 case Bytecodes::_fast_lputfield: 289 case Bytecodes::_fast_sputfield: 290 { 291 // We skip bytecode quickening for putfield instructions when 292 // the put_code written to the constant pool cache is zero. 293 // This is required so that every execution of this instruction 294 // calls out to InterpreterRuntime::resolve_get_put to do 295 // additional, required work. 296 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 297 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 298 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2)); 299 __ mov(bc_reg, bc); 300 __ cbz(temp_reg, L_patch_done); // test if bytecode is zero 301 } 302 break; 303 default: 304 assert(byte_no == -1, "sanity"); 305 // the pair bytecodes have already done the load. 306 if (load_bc_into_bc_reg) { 307 __ mov(bc_reg, bc); 308 } 309 } 310 311 if (__ can_post_breakpoint()) { 312 Label L_fast_patch; 313 // if a breakpoint is present we can't rewrite the stream directly 314 __ ldrb(temp_reg, at_bcp(0)); 315 __ cmp(temp_reg, Bytecodes::_breakpoint); 316 __ b(L_fast_patch, ne); 317 if (bc_reg != R3) { 318 __ mov(R3, bc_reg); 319 } 320 __ mov(R1, Rmethod); 321 __ mov(R2, Rbcp); 322 // Let breakpoint table handling rewrite to quicker bytecode 323 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3); 324 __ b(L_patch_done); 325 __ bind(L_fast_patch); 326 } 327 328 #ifdef ASSERT 329 Label L_okay; 330 __ ldrb(temp_reg, at_bcp(0)); 331 __ cmp(temp_reg, (int)Bytecodes::java_code(bc)); 332 __ b(L_okay, eq); 333 __ cmp(temp_reg, bc_reg); 334 __ b(L_okay, eq); 335 __ stop("patching the wrong bytecode"); 336 __ bind(L_okay); 337 #endif 338 339 // patch bytecode 340 __ strb(bc_reg, at_bcp(0)); 341 __ bind(L_patch_done); 342 } 343 344 //---------------------------------------------------------------------------------------------------- 345 // Individual instructions 346 347 void TemplateTable::nop() { 348 transition(vtos, vtos); 349 // nothing to do 350 } 351 352 void TemplateTable::shouldnotreachhere() { 353 transition(vtos, vtos); 354 __ stop("shouldnotreachhere bytecode"); 355 } 356 357 358 359 void TemplateTable::aconst_null() { 360 transition(vtos, atos); 361 __ mov(R0_tos, 0); 362 } 363 364 365 void TemplateTable::iconst(int value) { 366 transition(vtos, itos); 367 __ mov_slow(R0_tos, value); 368 } 369 370 371 void TemplateTable::lconst(int value) { 372 transition(vtos, ltos); 373 assert((value == 0) || (value == 1), "unexpected long constant"); 374 __ mov(R0_tos, value); 375 #ifndef AARCH64 376 __ mov(R1_tos_hi, 0); 377 #endif // !AARCH64 378 } 379 380 381 void TemplateTable::fconst(int value) { 382 transition(vtos, ftos); 383 #ifdef AARCH64 384 switch(value) { 385 case 0: __ fmov_sw(S0_tos, ZR); break; 386 case 1: __ fmov_s (S0_tos, 0x70); break; 387 case 2: __ fmov_s (S0_tos, 0x00); break; 388 default: ShouldNotReachHere(); break; 389 } 390 #else 391 const int zero = 0; // 0.0f 392 const int one = 0x3f800000; // 1.0f 393 const int two = 0x40000000; // 2.0f 394 395 switch(value) { 396 case 0: __ mov(R0_tos, zero); break; 397 case 1: __ mov(R0_tos, one); break; 398 case 2: __ mov(R0_tos, two); break; 399 default: ShouldNotReachHere(); break; 400 } 401 402 #ifndef __SOFTFP__ 403 __ fmsr(S0_tos, R0_tos); 404 #endif // !__SOFTFP__ 405 #endif // AARCH64 406 } 407 408 409 void TemplateTable::dconst(int value) { 410 transition(vtos, dtos); 411 #ifdef AARCH64 412 switch(value) { 413 case 0: __ fmov_dx(D0_tos, ZR); break; 414 case 1: __ fmov_d (D0_tos, 0x70); break; 415 default: ShouldNotReachHere(); break; 416 } 417 #else 418 const int one_lo = 0; // low part of 1.0 419 const int one_hi = 0x3ff00000; // high part of 1.0 420 421 if (value == 0) { 422 #ifdef __SOFTFP__ 423 __ mov(R0_tos_lo, 0); 424 __ mov(R1_tos_hi, 0); 425 #else 426 __ mov(R0_tmp, 0); 427 __ fmdrr(D0_tos, R0_tmp, R0_tmp); 428 #endif // __SOFTFP__ 429 } else if (value == 1) { 430 __ mov(R0_tos_lo, one_lo); 431 __ mov_slow(R1_tos_hi, one_hi); 432 #ifndef __SOFTFP__ 433 __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi); 434 #endif // !__SOFTFP__ 435 } else { 436 ShouldNotReachHere(); 437 } 438 #endif // AARCH64 439 } 440 441 442 void TemplateTable::bipush() { 443 transition(vtos, itos); 444 __ ldrsb(R0_tos, at_bcp(1)); 445 } 446 447 448 void TemplateTable::sipush() { 449 transition(vtos, itos); 450 __ ldrsb(R0_tmp, at_bcp(1)); 451 __ ldrb(R1_tmp, at_bcp(2)); 452 __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 453 } 454 455 456 void TemplateTable::ldc(bool wide) { 457 transition(vtos, vtos); 458 Label fastCase, Done; 459 460 const Register Rindex = R1_tmp; 461 const Register Rcpool = R2_tmp; 462 const Register Rtags = R3_tmp; 463 const Register RtagType = R3_tmp; 464 465 if (wide) { 466 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 467 } else { 468 __ ldrb(Rindex, at_bcp(1)); 469 } 470 __ get_cpool_and_tags(Rcpool, Rtags); 471 472 const int base_offset = ConstantPool::header_size() * wordSize; 473 const int tags_offset = Array<u1>::base_offset_in_bytes(); 474 475 // get const type 476 __ add(Rtemp, Rtags, tags_offset); 477 #ifdef AARCH64 478 __ add(Rtemp, Rtemp, Rindex); 479 __ ldarb(RtagType, Rtemp); // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough 480 #else 481 __ ldrb(RtagType, Address(Rtemp, Rindex)); 482 volatile_barrier(MacroAssembler::LoadLoad, Rtemp); 483 #endif // AARCH64 484 485 // unresolved class - get the resolved class 486 __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass); 487 488 // unresolved class in error (resolution failed) - call into runtime 489 // so that the same error from first resolution attempt is thrown. 490 #ifdef AARCH64 491 __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint 492 __ cond_cmp(RtagType, Rtemp, ne); 493 #else 494 __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne); 495 #endif // AARCH64 496 497 // resolved class - need to call vm to get java mirror of the class 498 __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne); 499 500 __ b(fastCase, ne); 501 502 // slow case - call runtime 503 __ mov(R1, wide); 504 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1); 505 __ push(atos); 506 __ b(Done); 507 508 // int, float, String 509 __ bind(fastCase); 510 #ifdef ASSERT 511 { Label L; 512 __ cmp(RtagType, JVM_CONSTANT_Integer); 513 __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne); 514 __ b(L, eq); 515 __ stop("unexpected tag type in ldc"); 516 __ bind(L); 517 } 518 #endif // ASSERT 519 // itos, ftos 520 __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); 521 __ ldr_u32(R0_tos, Address(Rtemp, base_offset)); 522 523 // floats and ints are placed on stack in the same way, so 524 // we can use push(itos) to transfer float value without VFP 525 __ push(itos); 526 __ bind(Done); 527 } 528 529 // Fast path for caching oop constants. 530 void TemplateTable::fast_aldc(bool wide) { 531 transition(vtos, atos); 532 int index_size = wide ? sizeof(u2) : sizeof(u1); 533 Label resolved; 534 535 // We are resolved if the resolved reference cache entry contains a 536 // non-null object (CallSite, etc.) 537 assert_different_registers(R0_tos, R2_tmp); 538 __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size); 539 __ load_resolved_reference_at_index(R0_tos, R2_tmp); 540 __ cbnz(R0_tos, resolved); 541 542 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 543 544 // first time invocation - must resolve first 545 __ mov(R1, (int)bytecode()); 546 __ call_VM(R0_tos, entry, R1); 547 __ bind(resolved); 548 549 if (VerifyOops) { 550 __ verify_oop(R0_tos); 551 } 552 } 553 554 void TemplateTable::ldc2_w() { 555 transition(vtos, vtos); 556 const Register Rtags = R2_tmp; 557 const Register Rindex = R3_tmp; 558 const Register Rcpool = R4_tmp; 559 const Register Rbase = R5_tmp; 560 561 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 562 563 __ get_cpool_and_tags(Rcpool, Rtags); 564 const int base_offset = ConstantPool::header_size() * wordSize; 565 const int tags_offset = Array<u1>::base_offset_in_bytes(); 566 567 __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); 568 569 #ifdef __ABI_HARD__ 570 Label Long, exit; 571 // get type from tags 572 __ add(Rtemp, Rtags, tags_offset); 573 __ ldrb(Rtemp, Address(Rtemp, Rindex)); 574 __ cmp(Rtemp, JVM_CONSTANT_Double); 575 __ b(Long, ne); 576 __ ldr_double(D0_tos, Address(Rbase, base_offset)); 577 578 __ push(dtos); 579 __ b(exit); 580 __ bind(Long); 581 #endif 582 583 #ifdef AARCH64 584 __ ldr(R0_tos, Address(Rbase, base_offset)); 585 #else 586 __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize)); 587 __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize)); 588 #endif // AARCH64 589 __ push(ltos); 590 591 #ifdef __ABI_HARD__ 592 __ bind(exit); 593 #endif 594 } 595 596 597 void TemplateTable::locals_index(Register reg, int offset) { 598 __ ldrb(reg, at_bcp(offset)); 599 } 600 601 void TemplateTable::iload() { 602 iload_internal(); 603 } 604 605 void TemplateTable::nofast_iload() { 606 iload_internal(may_not_rewrite); 607 } 608 609 void TemplateTable::iload_internal(RewriteControl rc) { 610 transition(vtos, itos); 611 612 if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) { 613 Label rewrite, done; 614 const Register next_bytecode = R1_tmp; 615 const Register target_bytecode = R2_tmp; 616 617 // get next byte 618 __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); 619 // if _iload, wait to rewrite to iload2. We only want to rewrite the 620 // last two iloads in a pair. Comparing against fast_iload means that 621 // the next bytecode is neither an iload or a caload, and therefore 622 // an iload pair. 623 __ cmp(next_bytecode, Bytecodes::_iload); 624 __ b(done, eq); 625 626 __ cmp(next_bytecode, Bytecodes::_fast_iload); 627 __ mov(target_bytecode, Bytecodes::_fast_iload2); 628 __ b(rewrite, eq); 629 630 // if _caload, rewrite to fast_icaload 631 __ cmp(next_bytecode, Bytecodes::_caload); 632 __ mov(target_bytecode, Bytecodes::_fast_icaload); 633 __ b(rewrite, eq); 634 635 // rewrite so iload doesn't check again. 636 __ mov(target_bytecode, Bytecodes::_fast_iload); 637 638 // rewrite 639 // R2: fast bytecode 640 __ bind(rewrite); 641 patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false); 642 __ bind(done); 643 } 644 645 // Get the local value into tos 646 const Register Rlocal_index = R1_tmp; 647 locals_index(Rlocal_index); 648 Address local = load_iaddress(Rlocal_index, Rtemp); 649 __ ldr_s32(R0_tos, local); 650 } 651 652 653 void TemplateTable::fast_iload2() { 654 transition(vtos, itos); 655 const Register Rlocal_index = R1_tmp; 656 657 locals_index(Rlocal_index); 658 Address local = load_iaddress(Rlocal_index, Rtemp); 659 __ ldr_s32(R0_tos, local); 660 __ push(itos); 661 662 locals_index(Rlocal_index, 3); 663 local = load_iaddress(Rlocal_index, Rtemp); 664 __ ldr_s32(R0_tos, local); 665 } 666 667 void TemplateTable::fast_iload() { 668 transition(vtos, itos); 669 const Register Rlocal_index = R1_tmp; 670 671 locals_index(Rlocal_index); 672 Address local = load_iaddress(Rlocal_index, Rtemp); 673 __ ldr_s32(R0_tos, local); 674 } 675 676 677 void TemplateTable::lload() { 678 transition(vtos, ltos); 679 const Register Rlocal_index = R2_tmp; 680 681 locals_index(Rlocal_index); 682 load_category2_local(Rlocal_index, R3_tmp); 683 } 684 685 686 void TemplateTable::fload() { 687 transition(vtos, ftos); 688 const Register Rlocal_index = R2_tmp; 689 690 // Get the local value into tos 691 locals_index(Rlocal_index); 692 Address local = load_faddress(Rlocal_index, Rtemp); 693 #ifdef __SOFTFP__ 694 __ ldr(R0_tos, local); 695 #else 696 __ ldr_float(S0_tos, local); 697 #endif // __SOFTFP__ 698 } 699 700 701 void TemplateTable::dload() { 702 transition(vtos, dtos); 703 const Register Rlocal_index = R2_tmp; 704 705 locals_index(Rlocal_index); 706 707 #ifdef __SOFTFP__ 708 load_category2_local(Rlocal_index, R3_tmp); 709 #else 710 __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp)); 711 #endif // __SOFTFP__ 712 } 713 714 715 void TemplateTable::aload() { 716 transition(vtos, atos); 717 const Register Rlocal_index = R1_tmp; 718 719 locals_index(Rlocal_index); 720 Address local = load_aaddress(Rlocal_index, Rtemp); 721 __ ldr(R0_tos, local); 722 } 723 724 725 void TemplateTable::locals_index_wide(Register reg) { 726 assert_different_registers(reg, Rtemp); 727 __ ldrb(Rtemp, at_bcp(2)); 728 __ ldrb(reg, at_bcp(3)); 729 __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8)); 730 } 731 732 733 void TemplateTable::wide_iload() { 734 transition(vtos, itos); 735 const Register Rlocal_index = R2_tmp; 736 737 locals_index_wide(Rlocal_index); 738 Address local = load_iaddress(Rlocal_index, Rtemp); 739 __ ldr_s32(R0_tos, local); 740 } 741 742 743 void TemplateTable::wide_lload() { 744 transition(vtos, ltos); 745 const Register Rlocal_index = R2_tmp; 746 const Register Rlocal_base = R3_tmp; 747 748 locals_index_wide(Rlocal_index); 749 load_category2_local(Rlocal_index, R3_tmp); 750 } 751 752 753 void TemplateTable::wide_fload() { 754 transition(vtos, ftos); 755 const Register Rlocal_index = R2_tmp; 756 757 locals_index_wide(Rlocal_index); 758 Address local = load_faddress(Rlocal_index, Rtemp); 759 #ifdef __SOFTFP__ 760 __ ldr(R0_tos, local); 761 #else 762 __ ldr_float(S0_tos, local); 763 #endif // __SOFTFP__ 764 } 765 766 767 void TemplateTable::wide_dload() { 768 transition(vtos, dtos); 769 const Register Rlocal_index = R2_tmp; 770 771 locals_index_wide(Rlocal_index); 772 #ifdef __SOFTFP__ 773 load_category2_local(Rlocal_index, R3_tmp); 774 #else 775 __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp)); 776 #endif // __SOFTFP__ 777 } 778 779 780 void TemplateTable::wide_aload() { 781 transition(vtos, atos); 782 const Register Rlocal_index = R2_tmp; 783 784 locals_index_wide(Rlocal_index); 785 Address local = load_aaddress(Rlocal_index, Rtemp); 786 __ ldr(R0_tos, local); 787 } 788 789 void TemplateTable::index_check(Register array, Register index) { 790 // Pop ptr into array 791 __ pop_ptr(array); 792 index_check_without_pop(array, index); 793 } 794 795 void TemplateTable::index_check_without_pop(Register array, Register index) { 796 assert_different_registers(array, index, Rtemp); 797 // check array 798 __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes()); 799 // check index 800 __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes())); 801 __ cmp_32(index, Rtemp); 802 if (index != R4_ArrayIndexOutOfBounds_index) { 803 // convention with generate_ArrayIndexOutOfBounds_handler() 804 __ mov(R4_ArrayIndexOutOfBounds_index, index, hs); 805 } 806 __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs); 807 } 808 809 810 void TemplateTable::iaload() { 811 transition(itos, itos); 812 const Register Rarray = R1_tmp; 813 const Register Rindex = R0_tos; 814 815 index_check(Rarray, Rindex); 816 __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp)); 817 } 818 819 820 void TemplateTable::laload() { 821 transition(itos, ltos); 822 const Register Rarray = R1_tmp; 823 const Register Rindex = R0_tos; 824 825 index_check(Rarray, Rindex); 826 827 #ifdef AARCH64 828 __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp)); 829 #else 830 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong)); 831 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG)); 832 __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi)); 833 #endif // AARCH64 834 } 835 836 837 void TemplateTable::faload() { 838 transition(itos, ftos); 839 const Register Rarray = R1_tmp; 840 const Register Rindex = R0_tos; 841 842 index_check(Rarray, Rindex); 843 844 Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp); 845 #ifdef __SOFTFP__ 846 __ ldr(R0_tos, addr); 847 #else 848 __ ldr_float(S0_tos, addr); 849 #endif // __SOFTFP__ 850 } 851 852 853 void TemplateTable::daload() { 854 transition(itos, dtos); 855 const Register Rarray = R1_tmp; 856 const Register Rindex = R0_tos; 857 858 index_check(Rarray, Rindex); 859 860 #ifdef __SOFTFP__ 861 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong)); 862 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 863 __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi)); 864 #else 865 __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp)); 866 #endif // __SOFTFP__ 867 } 868 869 870 void TemplateTable::aaload() { 871 transition(itos, atos); 872 const Register Rarray = R1_tmp; 873 const Register Rindex = R0_tos; 874 875 index_check(Rarray, Rindex); 876 __ load_heap_oop(R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp)); 877 } 878 879 880 void TemplateTable::baload() { 881 transition(itos, itos); 882 const Register Rarray = R1_tmp; 883 const Register Rindex = R0_tos; 884 885 index_check(Rarray, Rindex); 886 __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp)); 887 } 888 889 890 void TemplateTable::caload() { 891 transition(itos, itos); 892 const Register Rarray = R1_tmp; 893 const Register Rindex = R0_tos; 894 895 index_check(Rarray, Rindex); 896 __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp)); 897 } 898 899 900 // iload followed by caload frequent pair 901 void TemplateTable::fast_icaload() { 902 transition(vtos, itos); 903 const Register Rlocal_index = R1_tmp; 904 const Register Rarray = R1_tmp; 905 const Register Rindex = R4_tmp; // index_check prefers index on R4 906 assert_different_registers(Rlocal_index, Rindex); 907 assert_different_registers(Rarray, Rindex); 908 909 // load index out of locals 910 locals_index(Rlocal_index); 911 Address local = load_iaddress(Rlocal_index, Rtemp); 912 __ ldr_s32(Rindex, local); 913 914 // get array element 915 index_check(Rarray, Rindex); 916 __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp)); 917 } 918 919 920 void TemplateTable::saload() { 921 transition(itos, itos); 922 const Register Rarray = R1_tmp; 923 const Register Rindex = R0_tos; 924 925 index_check(Rarray, Rindex); 926 __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp)); 927 } 928 929 930 void TemplateTable::iload(int n) { 931 transition(vtos, itos); 932 __ ldr_s32(R0_tos, iaddress(n)); 933 } 934 935 936 void TemplateTable::lload(int n) { 937 transition(vtos, ltos); 938 #ifdef AARCH64 939 __ ldr(R0_tos, laddress(n)); 940 #else 941 __ ldr(R0_tos_lo, laddress(n)); 942 __ ldr(R1_tos_hi, haddress(n)); 943 #endif // AARCH64 944 } 945 946 947 void TemplateTable::fload(int n) { 948 transition(vtos, ftos); 949 #ifdef __SOFTFP__ 950 __ ldr(R0_tos, faddress(n)); 951 #else 952 __ ldr_float(S0_tos, faddress(n)); 953 #endif // __SOFTFP__ 954 } 955 956 957 void TemplateTable::dload(int n) { 958 transition(vtos, dtos); 959 #ifdef __SOFTFP__ 960 __ ldr(R0_tos_lo, laddress(n)); 961 __ ldr(R1_tos_hi, haddress(n)); 962 #else 963 __ ldr_double(D0_tos, daddress(n)); 964 #endif // __SOFTFP__ 965 } 966 967 968 void TemplateTable::aload(int n) { 969 transition(vtos, atos); 970 __ ldr(R0_tos, aaddress(n)); 971 } 972 973 void TemplateTable::aload_0() { 974 aload_0_internal(); 975 } 976 977 void TemplateTable::nofast_aload_0() { 978 aload_0_internal(may_not_rewrite); 979 } 980 981 void TemplateTable::aload_0_internal(RewriteControl rc) { 982 transition(vtos, atos); 983 // According to bytecode histograms, the pairs: 984 // 985 // _aload_0, _fast_igetfield 986 // _aload_0, _fast_agetfield 987 // _aload_0, _fast_fgetfield 988 // 989 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 990 // bytecode checks if the next bytecode is either _fast_igetfield, 991 // _fast_agetfield or _fast_fgetfield and then rewrites the 992 // current bytecode into a pair bytecode; otherwise it rewrites the current 993 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 994 // 995 // Note: If the next bytecode is _getfield, the rewrite must be delayed, 996 // otherwise we may miss an opportunity for a pair. 997 // 998 // Also rewrite frequent pairs 999 // aload_0, aload_1 1000 // aload_0, iload_1 1001 // These bytecodes with a small amount of code are most profitable to rewrite 1002 if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) { 1003 Label rewrite, done; 1004 const Register next_bytecode = R1_tmp; 1005 const Register target_bytecode = R2_tmp; 1006 1007 // get next byte 1008 __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); 1009 1010 // if _getfield then wait with rewrite 1011 __ cmp(next_bytecode, Bytecodes::_getfield); 1012 __ b(done, eq); 1013 1014 // if _igetfield then rewrite to _fast_iaccess_0 1015 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1016 __ cmp(next_bytecode, Bytecodes::_fast_igetfield); 1017 __ mov(target_bytecode, Bytecodes::_fast_iaccess_0); 1018 __ b(rewrite, eq); 1019 1020 // if _agetfield then rewrite to _fast_aaccess_0 1021 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1022 __ cmp(next_bytecode, Bytecodes::_fast_agetfield); 1023 __ mov(target_bytecode, Bytecodes::_fast_aaccess_0); 1024 __ b(rewrite, eq); 1025 1026 // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0 1027 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1028 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); 1029 1030 __ cmp(next_bytecode, Bytecodes::_fast_fgetfield); 1031 #ifdef AARCH64 1032 __ mov(Rtemp, Bytecodes::_fast_faccess_0); 1033 __ mov(target_bytecode, Bytecodes::_fast_aload_0); 1034 __ mov(target_bytecode, Rtemp, eq); 1035 #else 1036 __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq); 1037 __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne); 1038 #endif // AARCH64 1039 1040 // rewrite 1041 __ bind(rewrite); 1042 patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false); 1043 1044 __ bind(done); 1045 } 1046 1047 aload(0); 1048 } 1049 1050 void TemplateTable::istore() { 1051 transition(itos, vtos); 1052 const Register Rlocal_index = R2_tmp; 1053 1054 locals_index(Rlocal_index); 1055 Address local = load_iaddress(Rlocal_index, Rtemp); 1056 __ str_32(R0_tos, local); 1057 } 1058 1059 1060 void TemplateTable::lstore() { 1061 transition(ltos, vtos); 1062 const Register Rlocal_index = R2_tmp; 1063 1064 locals_index(Rlocal_index); 1065 store_category2_local(Rlocal_index, R3_tmp); 1066 } 1067 1068 1069 void TemplateTable::fstore() { 1070 transition(ftos, vtos); 1071 const Register Rlocal_index = R2_tmp; 1072 1073 locals_index(Rlocal_index); 1074 Address local = load_faddress(Rlocal_index, Rtemp); 1075 #ifdef __SOFTFP__ 1076 __ str(R0_tos, local); 1077 #else 1078 __ str_float(S0_tos, local); 1079 #endif // __SOFTFP__ 1080 } 1081 1082 1083 void TemplateTable::dstore() { 1084 transition(dtos, vtos); 1085 const Register Rlocal_index = R2_tmp; 1086 1087 locals_index(Rlocal_index); 1088 1089 #ifdef __SOFTFP__ 1090 store_category2_local(Rlocal_index, R3_tmp); 1091 #else 1092 __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp)); 1093 #endif // __SOFTFP__ 1094 } 1095 1096 1097 void TemplateTable::astore() { 1098 transition(vtos, vtos); 1099 const Register Rlocal_index = R1_tmp; 1100 1101 __ pop_ptr(R0_tos); 1102 locals_index(Rlocal_index); 1103 Address local = load_aaddress(Rlocal_index, Rtemp); 1104 __ str(R0_tos, local); 1105 } 1106 1107 1108 void TemplateTable::wide_istore() { 1109 transition(vtos, vtos); 1110 const Register Rlocal_index = R2_tmp; 1111 1112 __ pop_i(R0_tos); 1113 locals_index_wide(Rlocal_index); 1114 Address local = load_iaddress(Rlocal_index, Rtemp); 1115 __ str_32(R0_tos, local); 1116 } 1117 1118 1119 void TemplateTable::wide_lstore() { 1120 transition(vtos, vtos); 1121 const Register Rlocal_index = R2_tmp; 1122 const Register Rlocal_base = R3_tmp; 1123 1124 #ifdef AARCH64 1125 __ pop_l(R0_tos); 1126 #else 1127 __ pop_l(R0_tos_lo, R1_tos_hi); 1128 #endif // AARCH64 1129 1130 locals_index_wide(Rlocal_index); 1131 store_category2_local(Rlocal_index, R3_tmp); 1132 } 1133 1134 1135 void TemplateTable::wide_fstore() { 1136 wide_istore(); 1137 } 1138 1139 1140 void TemplateTable::wide_dstore() { 1141 wide_lstore(); 1142 } 1143 1144 1145 void TemplateTable::wide_astore() { 1146 transition(vtos, vtos); 1147 const Register Rlocal_index = R2_tmp; 1148 1149 __ pop_ptr(R0_tos); 1150 locals_index_wide(Rlocal_index); 1151 Address local = load_aaddress(Rlocal_index, Rtemp); 1152 __ str(R0_tos, local); 1153 } 1154 1155 1156 void TemplateTable::iastore() { 1157 transition(itos, vtos); 1158 const Register Rindex = R4_tmp; // index_check prefers index in R4 1159 const Register Rarray = R3_tmp; 1160 // R0_tos: value 1161 1162 __ pop_i(Rindex); 1163 index_check(Rarray, Rindex); 1164 __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp)); 1165 } 1166 1167 1168 void TemplateTable::lastore() { 1169 transition(ltos, vtos); 1170 const Register Rindex = R4_tmp; // index_check prefers index in R4 1171 const Register Rarray = R3_tmp; 1172 // R0_tos_lo:R1_tos_hi: value 1173 1174 __ pop_i(Rindex); 1175 index_check(Rarray, Rindex); 1176 1177 #ifdef AARCH64 1178 __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp)); 1179 #else 1180 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong)); 1181 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG)); 1182 __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi)); 1183 #endif // AARCH64 1184 } 1185 1186 1187 void TemplateTable::fastore() { 1188 transition(ftos, vtos); 1189 const Register Rindex = R4_tmp; // index_check prefers index in R4 1190 const Register Rarray = R3_tmp; 1191 // S0_tos/R0_tos: value 1192 1193 __ pop_i(Rindex); 1194 index_check(Rarray, Rindex); 1195 Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp); 1196 1197 #ifdef __SOFTFP__ 1198 __ str(R0_tos, addr); 1199 #else 1200 __ str_float(S0_tos, addr); 1201 #endif // __SOFTFP__ 1202 } 1203 1204 1205 void TemplateTable::dastore() { 1206 transition(dtos, vtos); 1207 const Register Rindex = R4_tmp; // index_check prefers index in R4 1208 const Register Rarray = R3_tmp; 1209 // D0_tos / R0_tos_lo:R1_to_hi: value 1210 1211 __ pop_i(Rindex); 1212 index_check(Rarray, Rindex); 1213 1214 #ifdef __SOFTFP__ 1215 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong)); 1216 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 1217 __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi)); 1218 #else 1219 __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp)); 1220 #endif // __SOFTFP__ 1221 } 1222 1223 1224 void TemplateTable::aastore() { 1225 transition(vtos, vtos); 1226 Label is_null, throw_array_store, done; 1227 1228 const Register Raddr_1 = R1_tmp; 1229 const Register Rvalue_2 = R2_tmp; 1230 const Register Rarray_3 = R3_tmp; 1231 const Register Rindex_4 = R4_tmp; // preferred by index_check_without_pop() 1232 const Register Rsub_5 = R5_tmp; 1233 const Register Rsuper_LR = LR_tmp; 1234 1235 // stack: ..., array, index, value 1236 __ ldr(Rvalue_2, at_tos()); // Value 1237 __ ldr_s32(Rindex_4, at_tos_p1()); // Index 1238 __ ldr(Rarray_3, at_tos_p2()); // Array 1239 1240 index_check_without_pop(Rarray_3, Rindex_4); 1241 1242 // Compute the array base 1243 __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 1244 1245 // do array store check - check for NULL value first 1246 __ cbz(Rvalue_2, is_null); 1247 1248 // Load subklass 1249 __ load_klass(Rsub_5, Rvalue_2); 1250 // Load superklass 1251 __ load_klass(Rtemp, Rarray_3); 1252 __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset())); 1253 1254 __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp); 1255 // Come here on success 1256 1257 // Store value 1258 __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop)); 1259 1260 // Now store using the appropriate barrier 1261 do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, false); 1262 __ b(done); 1263 1264 __ bind(throw_array_store); 1265 1266 // Come here on failure of subtype check 1267 __ profile_typecheck_failed(R0_tmp); 1268 1269 // object is at TOS 1270 __ b(Interpreter::_throw_ArrayStoreException_entry); 1271 1272 // Have a NULL in Rvalue_2, store NULL at array[index]. 1273 __ bind(is_null); 1274 __ profile_null_seen(R0_tmp); 1275 1276 // Store a NULL 1277 do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, true); 1278 1279 // Pop stack arguments 1280 __ bind(done); 1281 __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize); 1282 } 1283 1284 1285 void TemplateTable::bastore() { 1286 transition(itos, vtos); 1287 const Register Rindex = R4_tmp; // index_check prefers index in R4 1288 const Register Rarray = R3_tmp; 1289 // R0_tos: value 1290 1291 __ pop_i(Rindex); 1292 index_check(Rarray, Rindex); 1293 1294 // Need to check whether array is boolean or byte 1295 // since both types share the bastore bytecode. 1296 __ load_klass(Rtemp, Rarray); 1297 __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset())); 1298 Label L_skip; 1299 __ tst(Rtemp, Klass::layout_helper_boolean_diffbit()); 1300 __ b(L_skip, eq); 1301 __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1302 __ bind(L_skip); 1303 __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp)); 1304 } 1305 1306 1307 void TemplateTable::castore() { 1308 transition(itos, vtos); 1309 const Register Rindex = R4_tmp; // index_check prefers index in R4 1310 const Register Rarray = R3_tmp; 1311 // R0_tos: value 1312 1313 __ pop_i(Rindex); 1314 index_check(Rarray, Rindex); 1315 1316 __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp)); 1317 } 1318 1319 1320 void TemplateTable::sastore() { 1321 assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) == 1322 arrayOopDesc::base_offset_in_bytes(T_SHORT), 1323 "base offsets for char and short should be equal"); 1324 castore(); 1325 } 1326 1327 1328 void TemplateTable::istore(int n) { 1329 transition(itos, vtos); 1330 __ str_32(R0_tos, iaddress(n)); 1331 } 1332 1333 1334 void TemplateTable::lstore(int n) { 1335 transition(ltos, vtos); 1336 #ifdef AARCH64 1337 __ str(R0_tos, laddress(n)); 1338 #else 1339 __ str(R0_tos_lo, laddress(n)); 1340 __ str(R1_tos_hi, haddress(n)); 1341 #endif // AARCH64 1342 } 1343 1344 1345 void TemplateTable::fstore(int n) { 1346 transition(ftos, vtos); 1347 #ifdef __SOFTFP__ 1348 __ str(R0_tos, faddress(n)); 1349 #else 1350 __ str_float(S0_tos, faddress(n)); 1351 #endif // __SOFTFP__ 1352 } 1353 1354 1355 void TemplateTable::dstore(int n) { 1356 transition(dtos, vtos); 1357 #ifdef __SOFTFP__ 1358 __ str(R0_tos_lo, laddress(n)); 1359 __ str(R1_tos_hi, haddress(n)); 1360 #else 1361 __ str_double(D0_tos, daddress(n)); 1362 #endif // __SOFTFP__ 1363 } 1364 1365 1366 void TemplateTable::astore(int n) { 1367 transition(vtos, vtos); 1368 __ pop_ptr(R0_tos); 1369 __ str(R0_tos, aaddress(n)); 1370 } 1371 1372 1373 void TemplateTable::pop() { 1374 transition(vtos, vtos); 1375 __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize); 1376 } 1377 1378 1379 void TemplateTable::pop2() { 1380 transition(vtos, vtos); 1381 __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize); 1382 } 1383 1384 1385 void TemplateTable::dup() { 1386 transition(vtos, vtos); 1387 // stack: ..., a 1388 __ load_ptr(0, R0_tmp); 1389 __ push_ptr(R0_tmp); 1390 // stack: ..., a, a 1391 } 1392 1393 1394 void TemplateTable::dup_x1() { 1395 transition(vtos, vtos); 1396 // stack: ..., a, b 1397 __ load_ptr(0, R0_tmp); // load b 1398 __ load_ptr(1, R2_tmp); // load a 1399 __ store_ptr(1, R0_tmp); // store b 1400 __ store_ptr(0, R2_tmp); // store a 1401 __ push_ptr(R0_tmp); // push b 1402 // stack: ..., b, a, b 1403 } 1404 1405 1406 void TemplateTable::dup_x2() { 1407 transition(vtos, vtos); 1408 // stack: ..., a, b, c 1409 __ load_ptr(0, R0_tmp); // load c 1410 __ load_ptr(1, R2_tmp); // load b 1411 __ load_ptr(2, R4_tmp); // load a 1412 1413 __ push_ptr(R0_tmp); // push c 1414 1415 // stack: ..., a, b, c, c 1416 __ store_ptr(1, R2_tmp); // store b 1417 __ store_ptr(2, R4_tmp); // store a 1418 __ store_ptr(3, R0_tmp); // store c 1419 // stack: ..., c, a, b, c 1420 } 1421 1422 1423 void TemplateTable::dup2() { 1424 transition(vtos, vtos); 1425 // stack: ..., a, b 1426 __ load_ptr(1, R0_tmp); // load a 1427 __ push_ptr(R0_tmp); // push a 1428 __ load_ptr(1, R0_tmp); // load b 1429 __ push_ptr(R0_tmp); // push b 1430 // stack: ..., a, b, a, b 1431 } 1432 1433 1434 void TemplateTable::dup2_x1() { 1435 transition(vtos, vtos); 1436 1437 // stack: ..., a, b, c 1438 __ load_ptr(0, R4_tmp); // load c 1439 __ load_ptr(1, R2_tmp); // load b 1440 __ load_ptr(2, R0_tmp); // load a 1441 1442 __ push_ptr(R2_tmp); // push b 1443 __ push_ptr(R4_tmp); // push c 1444 1445 // stack: ..., a, b, c, b, c 1446 1447 __ store_ptr(2, R0_tmp); // store a 1448 __ store_ptr(3, R4_tmp); // store c 1449 __ store_ptr(4, R2_tmp); // store b 1450 1451 // stack: ..., b, c, a, b, c 1452 } 1453 1454 1455 void TemplateTable::dup2_x2() { 1456 transition(vtos, vtos); 1457 // stack: ..., a, b, c, d 1458 __ load_ptr(0, R0_tmp); // load d 1459 __ load_ptr(1, R2_tmp); // load c 1460 __ push_ptr(R2_tmp); // push c 1461 __ push_ptr(R0_tmp); // push d 1462 // stack: ..., a, b, c, d, c, d 1463 __ load_ptr(4, R4_tmp); // load b 1464 __ store_ptr(4, R0_tmp); // store d in b 1465 __ store_ptr(2, R4_tmp); // store b in d 1466 // stack: ..., a, d, c, b, c, d 1467 __ load_ptr(5, R4_tmp); // load a 1468 __ store_ptr(5, R2_tmp); // store c in a 1469 __ store_ptr(3, R4_tmp); // store a in c 1470 // stack: ..., c, d, a, b, c, d 1471 } 1472 1473 1474 void TemplateTable::swap() { 1475 transition(vtos, vtos); 1476 // stack: ..., a, b 1477 __ load_ptr(1, R0_tmp); // load a 1478 __ load_ptr(0, R2_tmp); // load b 1479 __ store_ptr(0, R0_tmp); // store a in b 1480 __ store_ptr(1, R2_tmp); // store b in a 1481 // stack: ..., b, a 1482 } 1483 1484 1485 void TemplateTable::iop2(Operation op) { 1486 transition(itos, itos); 1487 const Register arg1 = R1_tmp; 1488 const Register arg2 = R0_tos; 1489 1490 __ pop_i(arg1); 1491 switch (op) { 1492 case add : __ add_32 (R0_tos, arg1, arg2); break; 1493 case sub : __ sub_32 (R0_tos, arg1, arg2); break; 1494 case mul : __ mul_32 (R0_tos, arg1, arg2); break; 1495 case _and : __ and_32 (R0_tos, arg1, arg2); break; 1496 case _or : __ orr_32 (R0_tos, arg1, arg2); break; 1497 case _xor : __ eor_32 (R0_tos, arg1, arg2); break; 1498 #ifdef AARCH64 1499 case shl : __ lslv_w (R0_tos, arg1, arg2); break; 1500 case shr : __ asrv_w (R0_tos, arg1, arg2); break; 1501 case ushr : __ lsrv_w (R0_tos, arg1, arg2); break; 1502 #else 1503 case shl : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break; 1504 case shr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break; 1505 case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break; 1506 #endif // AARCH64 1507 default : ShouldNotReachHere(); 1508 } 1509 } 1510 1511 1512 void TemplateTable::lop2(Operation op) { 1513 transition(ltos, ltos); 1514 #ifdef AARCH64 1515 const Register arg1 = R1_tmp; 1516 const Register arg2 = R0_tos; 1517 1518 __ pop_l(arg1); 1519 switch (op) { 1520 case add : __ add (R0_tos, arg1, arg2); break; 1521 case sub : __ sub (R0_tos, arg1, arg2); break; 1522 case _and : __ andr(R0_tos, arg1, arg2); break; 1523 case _or : __ orr (R0_tos, arg1, arg2); break; 1524 case _xor : __ eor (R0_tos, arg1, arg2); break; 1525 default : ShouldNotReachHere(); 1526 } 1527 #else 1528 const Register arg1_lo = R2_tmp; 1529 const Register arg1_hi = R3_tmp; 1530 const Register arg2_lo = R0_tos_lo; 1531 const Register arg2_hi = R1_tos_hi; 1532 1533 __ pop_l(arg1_lo, arg1_hi); 1534 switch (op) { 1535 case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break; 1536 case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break; 1537 case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break; 1538 case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break; 1539 case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break; 1540 default : ShouldNotReachHere(); 1541 } 1542 #endif // AARCH64 1543 } 1544 1545 1546 void TemplateTable::idiv() { 1547 transition(itos, itos); 1548 #ifdef AARCH64 1549 const Register divisor = R0_tos; 1550 const Register dividend = R1_tmp; 1551 1552 __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry); 1553 __ pop_i(dividend); 1554 __ sdiv_w(R0_tos, dividend, divisor); 1555 #else 1556 __ mov(R2, R0_tos); 1557 __ pop_i(R0); 1558 // R0 - dividend 1559 // R2 - divisor 1560 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none); 1561 // R1 - result 1562 __ mov(R0_tos, R1); 1563 #endif // AARCH64 1564 } 1565 1566 1567 void TemplateTable::irem() { 1568 transition(itos, itos); 1569 #ifdef AARCH64 1570 const Register divisor = R0_tos; 1571 const Register dividend = R1_tmp; 1572 const Register quotient = R2_tmp; 1573 1574 __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry); 1575 __ pop_i(dividend); 1576 __ sdiv_w(quotient, dividend, divisor); 1577 __ msub_w(R0_tos, divisor, quotient, dividend); 1578 #else 1579 __ mov(R2, R0_tos); 1580 __ pop_i(R0); 1581 // R0 - dividend 1582 // R2 - divisor 1583 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none); 1584 // R0 - remainder 1585 #endif // AARCH64 1586 } 1587 1588 1589 void TemplateTable::lmul() { 1590 transition(ltos, ltos); 1591 #ifdef AARCH64 1592 const Register arg1 = R0_tos; 1593 const Register arg2 = R1_tmp; 1594 1595 __ pop_l(arg2); 1596 __ mul(R0_tos, arg1, arg2); 1597 #else 1598 const Register arg1_lo = R0_tos_lo; 1599 const Register arg1_hi = R1_tos_hi; 1600 const Register arg2_lo = R2_tmp; 1601 const Register arg2_hi = R3_tmp; 1602 1603 __ pop_l(arg2_lo, arg2_hi); 1604 1605 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi); 1606 #endif // AARCH64 1607 } 1608 1609 1610 void TemplateTable::ldiv() { 1611 transition(ltos, ltos); 1612 #ifdef AARCH64 1613 const Register divisor = R0_tos; 1614 const Register dividend = R1_tmp; 1615 1616 __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry); 1617 __ pop_l(dividend); 1618 __ sdiv(R0_tos, dividend, divisor); 1619 #else 1620 const Register x_lo = R2_tmp; 1621 const Register x_hi = R3_tmp; 1622 const Register y_lo = R0_tos_lo; 1623 const Register y_hi = R1_tos_hi; 1624 1625 __ pop_l(x_lo, x_hi); 1626 1627 // check if y = 0 1628 __ orrs(Rtemp, y_lo, y_hi); 1629 __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq); 1630 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi); 1631 #endif // AARCH64 1632 } 1633 1634 1635 void TemplateTable::lrem() { 1636 transition(ltos, ltos); 1637 #ifdef AARCH64 1638 const Register divisor = R0_tos; 1639 const Register dividend = R1_tmp; 1640 const Register quotient = R2_tmp; 1641 1642 __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry); 1643 __ pop_l(dividend); 1644 __ sdiv(quotient, dividend, divisor); 1645 __ msub(R0_tos, divisor, quotient, dividend); 1646 #else 1647 const Register x_lo = R2_tmp; 1648 const Register x_hi = R3_tmp; 1649 const Register y_lo = R0_tos_lo; 1650 const Register y_hi = R1_tos_hi; 1651 1652 __ pop_l(x_lo, x_hi); 1653 1654 // check if y = 0 1655 __ orrs(Rtemp, y_lo, y_hi); 1656 __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq); 1657 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi); 1658 #endif // AARCH64 1659 } 1660 1661 1662 void TemplateTable::lshl() { 1663 transition(itos, ltos); 1664 #ifdef AARCH64 1665 const Register val = R1_tmp; 1666 const Register shift_cnt = R0_tos; 1667 __ pop_l(val); 1668 __ lslv(R0_tos, val, shift_cnt); 1669 #else 1670 const Register shift_cnt = R4_tmp; 1671 const Register val_lo = R2_tmp; 1672 const Register val_hi = R3_tmp; 1673 1674 __ pop_l(val_lo, val_hi); 1675 __ andr(shift_cnt, R0_tos, 63); 1676 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt); 1677 #endif // AARCH64 1678 } 1679 1680 1681 void TemplateTable::lshr() { 1682 transition(itos, ltos); 1683 #ifdef AARCH64 1684 const Register val = R1_tmp; 1685 const Register shift_cnt = R0_tos; 1686 __ pop_l(val); 1687 __ asrv(R0_tos, val, shift_cnt); 1688 #else 1689 const Register shift_cnt = R4_tmp; 1690 const Register val_lo = R2_tmp; 1691 const Register val_hi = R3_tmp; 1692 1693 __ pop_l(val_lo, val_hi); 1694 __ andr(shift_cnt, R0_tos, 63); 1695 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt); 1696 #endif // AARCH64 1697 } 1698 1699 1700 void TemplateTable::lushr() { 1701 transition(itos, ltos); 1702 #ifdef AARCH64 1703 const Register val = R1_tmp; 1704 const Register shift_cnt = R0_tos; 1705 __ pop_l(val); 1706 __ lsrv(R0_tos, val, shift_cnt); 1707 #else 1708 const Register shift_cnt = R4_tmp; 1709 const Register val_lo = R2_tmp; 1710 const Register val_hi = R3_tmp; 1711 1712 __ pop_l(val_lo, val_hi); 1713 __ andr(shift_cnt, R0_tos, 63); 1714 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt); 1715 #endif // AARCH64 1716 } 1717 1718 1719 void TemplateTable::fop2(Operation op) { 1720 transition(ftos, ftos); 1721 #ifdef __SOFTFP__ 1722 __ mov(R1, R0_tos); 1723 __ pop_i(R0); 1724 switch (op) { 1725 case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break; 1726 case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break; 1727 case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break; 1728 case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break; 1729 case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break; 1730 default : ShouldNotReachHere(); 1731 } 1732 #else 1733 const FloatRegister arg1 = S1_tmp; 1734 const FloatRegister arg2 = S0_tos; 1735 1736 switch (op) { 1737 case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break; 1738 case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break; 1739 case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break; 1740 case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break; 1741 case rem: 1742 #ifndef __ABI_HARD__ 1743 __ pop_f(arg1); 1744 __ fmrs(R0, arg1); 1745 __ fmrs(R1, arg2); 1746 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); 1747 __ fmsr(S0_tos, R0); 1748 #else 1749 __ mov_float(S1_reg, arg2); 1750 __ pop_f(S0); 1751 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1752 #endif // !__ABI_HARD__ 1753 break; 1754 default : ShouldNotReachHere(); 1755 } 1756 #endif // __SOFTFP__ 1757 } 1758 1759 1760 void TemplateTable::dop2(Operation op) { 1761 transition(dtos, dtos); 1762 #ifdef __SOFTFP__ 1763 __ mov(R2, R0_tos_lo); 1764 __ mov(R3, R1_tos_hi); 1765 __ pop_l(R0, R1); 1766 switch (op) { 1767 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 1768 case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break; 1769 case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break; 1770 case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break; 1771 case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break; 1772 case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break; 1773 default : ShouldNotReachHere(); 1774 } 1775 #else 1776 const FloatRegister arg1 = D1_tmp; 1777 const FloatRegister arg2 = D0_tos; 1778 1779 switch (op) { 1780 case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break; 1781 case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break; 1782 case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break; 1783 case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break; 1784 case rem: 1785 #ifndef __ABI_HARD__ 1786 __ pop_d(arg1); 1787 __ fmrrd(R0, R1, arg1); 1788 __ fmrrd(R2, R3, arg2); 1789 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); 1790 __ fmdrr(D0_tos, R0, R1); 1791 #else 1792 __ mov_double(D1, arg2); 1793 __ pop_d(D0); 1794 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1795 #endif // !__ABI_HARD__ 1796 break; 1797 default : ShouldNotReachHere(); 1798 } 1799 #endif // __SOFTFP__ 1800 } 1801 1802 1803 void TemplateTable::ineg() { 1804 transition(itos, itos); 1805 __ neg_32(R0_tos, R0_tos); 1806 } 1807 1808 1809 void TemplateTable::lneg() { 1810 transition(ltos, ltos); 1811 #ifdef AARCH64 1812 __ neg(R0_tos, R0_tos); 1813 #else 1814 __ rsbs(R0_tos_lo, R0_tos_lo, 0); 1815 __ rsc (R1_tos_hi, R1_tos_hi, 0); 1816 #endif // AARCH64 1817 } 1818 1819 1820 void TemplateTable::fneg() { 1821 transition(ftos, ftos); 1822 #ifdef __SOFTFP__ 1823 // Invert sign bit 1824 const int sign_mask = 0x80000000; 1825 __ eor(R0_tos, R0_tos, sign_mask); 1826 #else 1827 __ neg_float(S0_tos, S0_tos); 1828 #endif // __SOFTFP__ 1829 } 1830 1831 1832 void TemplateTable::dneg() { 1833 transition(dtos, dtos); 1834 #ifdef __SOFTFP__ 1835 // Invert sign bit in the high part of the double 1836 const int sign_mask_hi = 0x80000000; 1837 __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi); 1838 #else 1839 __ neg_double(D0_tos, D0_tos); 1840 #endif // __SOFTFP__ 1841 } 1842 1843 1844 void TemplateTable::iinc() { 1845 transition(vtos, vtos); 1846 const Register Rconst = R2_tmp; 1847 const Register Rlocal_index = R1_tmp; 1848 const Register Rval = R0_tmp; 1849 1850 __ ldrsb(Rconst, at_bcp(2)); 1851 locals_index(Rlocal_index); 1852 Address local = load_iaddress(Rlocal_index, Rtemp); 1853 __ ldr_s32(Rval, local); 1854 __ add(Rval, Rval, Rconst); 1855 __ str_32(Rval, local); 1856 } 1857 1858 1859 void TemplateTable::wide_iinc() { 1860 transition(vtos, vtos); 1861 const Register Rconst = R2_tmp; 1862 const Register Rlocal_index = R1_tmp; 1863 const Register Rval = R0_tmp; 1864 1865 // get constant in Rconst 1866 __ ldrsb(R2_tmp, at_bcp(4)); 1867 __ ldrb(R3_tmp, at_bcp(5)); 1868 __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8)); 1869 1870 locals_index_wide(Rlocal_index); 1871 Address local = load_iaddress(Rlocal_index, Rtemp); 1872 __ ldr_s32(Rval, local); 1873 __ add(Rval, Rval, Rconst); 1874 __ str_32(Rval, local); 1875 } 1876 1877 1878 void TemplateTable::convert() { 1879 // Checking 1880 #ifdef ASSERT 1881 { TosState tos_in = ilgl; 1882 TosState tos_out = ilgl; 1883 switch (bytecode()) { 1884 case Bytecodes::_i2l: // fall through 1885 case Bytecodes::_i2f: // fall through 1886 case Bytecodes::_i2d: // fall through 1887 case Bytecodes::_i2b: // fall through 1888 case Bytecodes::_i2c: // fall through 1889 case Bytecodes::_i2s: tos_in = itos; break; 1890 case Bytecodes::_l2i: // fall through 1891 case Bytecodes::_l2f: // fall through 1892 case Bytecodes::_l2d: tos_in = ltos; break; 1893 case Bytecodes::_f2i: // fall through 1894 case Bytecodes::_f2l: // fall through 1895 case Bytecodes::_f2d: tos_in = ftos; break; 1896 case Bytecodes::_d2i: // fall through 1897 case Bytecodes::_d2l: // fall through 1898 case Bytecodes::_d2f: tos_in = dtos; break; 1899 default : ShouldNotReachHere(); 1900 } 1901 switch (bytecode()) { 1902 case Bytecodes::_l2i: // fall through 1903 case Bytecodes::_f2i: // fall through 1904 case Bytecodes::_d2i: // fall through 1905 case Bytecodes::_i2b: // fall through 1906 case Bytecodes::_i2c: // fall through 1907 case Bytecodes::_i2s: tos_out = itos; break; 1908 case Bytecodes::_i2l: // fall through 1909 case Bytecodes::_f2l: // fall through 1910 case Bytecodes::_d2l: tos_out = ltos; break; 1911 case Bytecodes::_i2f: // fall through 1912 case Bytecodes::_l2f: // fall through 1913 case Bytecodes::_d2f: tos_out = ftos; break; 1914 case Bytecodes::_i2d: // fall through 1915 case Bytecodes::_l2d: // fall through 1916 case Bytecodes::_f2d: tos_out = dtos; break; 1917 default : ShouldNotReachHere(); 1918 } 1919 transition(tos_in, tos_out); 1920 } 1921 #endif // ASSERT 1922 1923 // Conversion 1924 switch (bytecode()) { 1925 case Bytecodes::_i2l: 1926 #ifdef AARCH64 1927 __ sign_extend(R0_tos, R0_tos, 32); 1928 #else 1929 __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1)); 1930 #endif // AARCH64 1931 break; 1932 1933 case Bytecodes::_i2f: 1934 #ifdef AARCH64 1935 __ scvtf_sw(S0_tos, R0_tos); 1936 #else 1937 #ifdef __SOFTFP__ 1938 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos); 1939 #else 1940 __ fmsr(S0_tmp, R0_tos); 1941 __ fsitos(S0_tos, S0_tmp); 1942 #endif // __SOFTFP__ 1943 #endif // AARCH64 1944 break; 1945 1946 case Bytecodes::_i2d: 1947 #ifdef AARCH64 1948 __ scvtf_dw(D0_tos, R0_tos); 1949 #else 1950 #ifdef __SOFTFP__ 1951 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos); 1952 #else 1953 __ fmsr(S0_tmp, R0_tos); 1954 __ fsitod(D0_tos, S0_tmp); 1955 #endif // __SOFTFP__ 1956 #endif // AARCH64 1957 break; 1958 1959 case Bytecodes::_i2b: 1960 __ sign_extend(R0_tos, R0_tos, 8); 1961 break; 1962 1963 case Bytecodes::_i2c: 1964 __ zero_extend(R0_tos, R0_tos, 16); 1965 break; 1966 1967 case Bytecodes::_i2s: 1968 __ sign_extend(R0_tos, R0_tos, 16); 1969 break; 1970 1971 case Bytecodes::_l2i: 1972 /* nothing to do */ 1973 break; 1974 1975 case Bytecodes::_l2f: 1976 #ifdef AARCH64 1977 __ scvtf_sx(S0_tos, R0_tos); 1978 #else 1979 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi); 1980 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__) 1981 __ fmsr(S0_tos, R0); 1982 #endif // !__SOFTFP__ && !__ABI_HARD__ 1983 #endif // AARCH64 1984 break; 1985 1986 case Bytecodes::_l2d: 1987 #ifdef AARCH64 1988 __ scvtf_dx(D0_tos, R0_tos); 1989 #else 1990 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi); 1991 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__) 1992 __ fmdrr(D0_tos, R0, R1); 1993 #endif // !__SOFTFP__ && !__ABI_HARD__ 1994 #endif // AARCH64 1995 break; 1996 1997 case Bytecodes::_f2i: 1998 #ifdef AARCH64 1999 __ fcvtzs_ws(R0_tos, S0_tos); 2000 #else 2001 #ifndef __SOFTFP__ 2002 __ ftosizs(S0_tos, S0_tos); 2003 __ fmrs(R0_tos, S0_tos); 2004 #else 2005 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos); 2006 #endif // !__SOFTFP__ 2007 #endif // AARCH64 2008 break; 2009 2010 case Bytecodes::_f2l: 2011 #ifdef AARCH64 2012 __ fcvtzs_xs(R0_tos, S0_tos); 2013 #else 2014 #ifndef __SOFTFP__ 2015 __ fmrs(R0_tos, S0_tos); 2016 #endif // !__SOFTFP__ 2017 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos); 2018 #endif // AARCH64 2019 break; 2020 2021 case Bytecodes::_f2d: 2022 #ifdef __SOFTFP__ 2023 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos); 2024 #else 2025 __ convert_f2d(D0_tos, S0_tos); 2026 #endif // __SOFTFP__ 2027 break; 2028 2029 case Bytecodes::_d2i: 2030 #ifdef AARCH64 2031 __ fcvtzs_wd(R0_tos, D0_tos); 2032 #else 2033 #ifndef __SOFTFP__ 2034 __ ftosizd(Stemp, D0); 2035 __ fmrs(R0, Stemp); 2036 #else 2037 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi); 2038 #endif // !__SOFTFP__ 2039 #endif // AARCH64 2040 break; 2041 2042 case Bytecodes::_d2l: 2043 #ifdef AARCH64 2044 __ fcvtzs_xd(R0_tos, D0_tos); 2045 #else 2046 #ifndef __SOFTFP__ 2047 __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos); 2048 #endif // !__SOFTFP__ 2049 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi); 2050 #endif // AARCH64 2051 break; 2052 2053 case Bytecodes::_d2f: 2054 #ifdef __SOFTFP__ 2055 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi); 2056 #else 2057 __ convert_d2f(S0_tos, D0_tos); 2058 #endif // __SOFTFP__ 2059 break; 2060 2061 default: 2062 ShouldNotReachHere(); 2063 } 2064 } 2065 2066 2067 void TemplateTable::lcmp() { 2068 transition(ltos, itos); 2069 #ifdef AARCH64 2070 const Register arg1 = R1_tmp; 2071 const Register arg2 = R0_tos; 2072 2073 __ pop_l(arg1); 2074 2075 __ cmp(arg1, arg2); 2076 __ cset(R0_tos, gt); // 1 if '>', else 0 2077 __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1 2078 #else 2079 const Register arg1_lo = R2_tmp; 2080 const Register arg1_hi = R3_tmp; 2081 const Register arg2_lo = R0_tos_lo; 2082 const Register arg2_hi = R1_tos_hi; 2083 const Register res = R4_tmp; 2084 2085 __ pop_l(arg1_lo, arg1_hi); 2086 2087 // long compare arg1 with arg2 2088 // result is -1/0/+1 if '<'/'='/'>' 2089 Label done; 2090 2091 __ mov (res, 0); 2092 __ cmp (arg1_hi, arg2_hi); 2093 __ mvn (res, 0, lt); 2094 __ mov (res, 1, gt); 2095 __ b(done, ne); 2096 __ cmp (arg1_lo, arg2_lo); 2097 __ mvn (res, 0, lo); 2098 __ mov (res, 1, hi); 2099 __ bind(done); 2100 __ mov (R0_tos, res); 2101 #endif // AARCH64 2102 } 2103 2104 2105 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 2106 assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result"); 2107 2108 #ifdef AARCH64 2109 if (is_float) { 2110 transition(ftos, itos); 2111 __ pop_f(S1_tmp); 2112 __ fcmp_s(S1_tmp, S0_tos); 2113 } else { 2114 transition(dtos, itos); 2115 __ pop_d(D1_tmp); 2116 __ fcmp_d(D1_tmp, D0_tos); 2117 } 2118 2119 if (unordered_result < 0) { 2120 __ cset(R0_tos, gt); // 1 if '>', else 0 2121 __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1 2122 } else { 2123 __ cset(R0_tos, hi); // 1 if '>' or unordered, else 0 2124 __ csinv(R0_tos, R0_tos, ZR, pl); // previous value if '>=' or unordered, else -1 2125 } 2126 2127 #else 2128 2129 #ifdef __SOFTFP__ 2130 2131 if (is_float) { 2132 transition(ftos, itos); 2133 const Register Rx = R0; 2134 const Register Ry = R1; 2135 2136 __ mov(Ry, R0_tos); 2137 __ pop_i(Rx); 2138 2139 if (unordered_result == 1) { 2140 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry); 2141 } else { 2142 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry); 2143 } 2144 2145 } else { 2146 2147 transition(dtos, itos); 2148 const Register Rx_lo = R0; 2149 const Register Rx_hi = R1; 2150 const Register Ry_lo = R2; 2151 const Register Ry_hi = R3; 2152 2153 __ mov(Ry_lo, R0_tos_lo); 2154 __ mov(Ry_hi, R1_tos_hi); 2155 __ pop_l(Rx_lo, Rx_hi); 2156 2157 if (unordered_result == 1) { 2158 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi); 2159 } else { 2160 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi); 2161 } 2162 } 2163 2164 #else 2165 2166 if (is_float) { 2167 transition(ftos, itos); 2168 __ pop_f(S1_tmp); 2169 __ fcmps(S1_tmp, S0_tos); 2170 } else { 2171 transition(dtos, itos); 2172 __ pop_d(D1_tmp); 2173 __ fcmpd(D1_tmp, D0_tos); 2174 } 2175 2176 __ fmstat(); 2177 2178 // comparison result | flag N | flag Z | flag C | flag V 2179 // "<" | 1 | 0 | 0 | 0 2180 // "==" | 0 | 1 | 1 | 0 2181 // ">" | 0 | 0 | 1 | 0 2182 // unordered | 0 | 0 | 1 | 1 2183 2184 if (unordered_result < 0) { 2185 __ mov(R0_tos, 1); // result == 1 if greater 2186 __ mvn(R0_tos, 0, lt); // result == -1 if less or unordered (N!=V) 2187 } else { 2188 __ mov(R0_tos, 1); // result == 1 if greater or unordered 2189 __ mvn(R0_tos, 0, mi); // result == -1 if less (N=1) 2190 } 2191 __ mov(R0_tos, 0, eq); // result == 0 if equ (Z=1) 2192 #endif // __SOFTFP__ 2193 #endif // AARCH64 2194 } 2195 2196 2197 void TemplateTable::branch(bool is_jsr, bool is_wide) { 2198 2199 const Register Rdisp = R0_tmp; 2200 const Register Rbumped_taken_count = R5_tmp; 2201 2202 __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count 2203 2204 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + 2205 InvocationCounter::counter_offset(); 2206 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + 2207 InvocationCounter::counter_offset(); 2208 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 2209 2210 // Load up R0 with the branch displacement 2211 if (is_wide) { 2212 __ ldrsb(R0_tmp, at_bcp(1)); 2213 __ ldrb(R1_tmp, at_bcp(2)); 2214 __ ldrb(R2_tmp, at_bcp(3)); 2215 __ ldrb(R3_tmp, at_bcp(4)); 2216 __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2217 __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2218 __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2219 } else { 2220 __ ldrsb(R0_tmp, at_bcp(1)); 2221 __ ldrb(R1_tmp, at_bcp(2)); 2222 __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte)); 2223 } 2224 2225 // Handle all the JSR stuff here, then exit. 2226 // It's much shorter and cleaner than intermingling with the 2227 // non-JSR normal-branch stuff occuring below. 2228 if (is_jsr) { 2229 // compute return address as bci in R1 2230 const Register Rret_addr = R1_tmp; 2231 assert_different_registers(Rdisp, Rret_addr, Rtemp); 2232 2233 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 2234 __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset())); 2235 __ sub(Rret_addr, Rret_addr, Rtemp); 2236 2237 // Load the next target bytecode into R3_bytecode and advance Rbcp 2238 #ifdef AARCH64 2239 __ add(Rbcp, Rbcp, Rdisp); 2240 __ ldrb(R3_bytecode, Address(Rbcp)); 2241 #else 2242 __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed)); 2243 #endif // AARCH64 2244 2245 // Push return address 2246 __ push_i(Rret_addr); 2247 // jsr returns vtos 2248 __ dispatch_only_noverify(vtos); 2249 return; 2250 } 2251 2252 // Normal (non-jsr) branch handling 2253 2254 // Adjust the bcp by the displacement in Rdisp and load next bytecode. 2255 #ifdef AARCH64 2256 __ add(Rbcp, Rbcp, Rdisp); 2257 __ ldrb(R3_bytecode, Address(Rbcp)); 2258 #else 2259 __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed)); 2260 #endif // AARCH64 2261 2262 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); 2263 Label backedge_counter_overflow; 2264 Label profile_method; 2265 Label dispatch; 2266 2267 if (UseLoopCounter) { 2268 // increment backedge counter for backward branches 2269 // Rdisp (R0): target offset 2270 2271 const Register Rcnt = R2_tmp; 2272 const Register Rcounters = R1_tmp; 2273 2274 // count only if backward branch 2275 #ifdef AARCH64 2276 __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM 2277 #else 2278 __ tst(Rdisp, Rdisp); 2279 __ b(dispatch, pl); 2280 #endif // AARCH64 2281 2282 if (TieredCompilation) { 2283 Label no_mdo; 2284 int increment = InvocationCounter::count_increment; 2285 if (ProfileInterpreter) { 2286 // Are we profiling? 2287 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 2288 __ cbz(Rtemp, no_mdo); 2289 // Increment the MDO backedge counter 2290 const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) + 2291 in_bytes(InvocationCounter::counter_offset())); 2292 const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset())); 2293 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 2294 Rcnt, R4_tmp, eq, &backedge_counter_overflow); 2295 __ b(dispatch); 2296 } 2297 __ bind(no_mdo); 2298 // Increment backedge counter in MethodCounters* 2299 // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64 2300 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, 2301 Rdisp, R3_bytecode, 2302 AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg)); 2303 const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset())); 2304 __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask, 2305 Rcnt, R4_tmp, eq, &backedge_counter_overflow); 2306 } else { 2307 // Increment backedge counter in MethodCounters* 2308 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, 2309 Rdisp, R3_bytecode, 2310 AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg)); 2311 __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter 2312 __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter 2313 __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter 2314 2315 __ ldr_u32(Rcnt, Address(Rcounters, inv_offset)); // load invocation counter 2316 #ifdef AARCH64 2317 __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value); // and the status bits 2318 #else 2319 __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value); // and the status bits 2320 #endif // AARCH64 2321 __ add(Rcnt, Rcnt, Rtemp); // add both counters 2322 2323 if (ProfileInterpreter) { 2324 // Test to see if we should create a method data oop 2325 const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 2326 __ ldr_s32(Rtemp, profile_limit); 2327 __ cmp_32(Rcnt, Rtemp); 2328 __ b(dispatch, lt); 2329 2330 // if no method data exists, go to profile method 2331 __ test_method_data_pointer(R4_tmp, profile_method); 2332 2333 if (UseOnStackReplacement) { 2334 // check for overflow against Rbumped_taken_count, which is the MDO taken count 2335 const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2336 __ ldr_s32(Rtemp, backward_branch_limit); 2337 __ cmp(Rbumped_taken_count, Rtemp); 2338 __ b(dispatch, lo); 2339 2340 // When ProfileInterpreter is on, the backedge_count comes from the 2341 // MethodData*, which value does not get reset on the call to 2342 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2343 // routine while the method is being compiled, add a second test to make 2344 // sure the overflow function is called only once every overflow_frequency. 2345 const int overflow_frequency = 1024; 2346 2347 #ifdef AARCH64 2348 __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1)); 2349 #else 2350 // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0 2351 assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency"); 2352 __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22)); 2353 #endif // AARCH64 2354 2355 __ b(backedge_counter_overflow, eq); 2356 } 2357 } else { 2358 if (UseOnStackReplacement) { 2359 // check for overflow against Rcnt, which is the sum of the counters 2360 const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2361 __ ldr_s32(Rtemp, backward_branch_limit); 2362 __ cmp_32(Rcnt, Rtemp); 2363 __ b(backedge_counter_overflow, hs); 2364 2365 } 2366 } 2367 } 2368 __ bind(dispatch); 2369 } 2370 2371 if (!UseOnStackReplacement) { 2372 __ bind(backedge_counter_overflow); 2373 } 2374 2375 // continue with the bytecode @ target 2376 __ dispatch_only(vtos); 2377 2378 if (UseLoopCounter) { 2379 if (ProfileInterpreter) { 2380 // Out-of-line code to allocate method data oop. 2381 __ bind(profile_method); 2382 2383 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 2384 __ set_method_data_pointer_for_bcp(); 2385 // reload next bytecode 2386 __ ldrb(R3_bytecode, Address(Rbcp)); 2387 __ b(dispatch); 2388 } 2389 2390 if (UseOnStackReplacement) { 2391 // invocation counter overflow 2392 __ bind(backedge_counter_overflow); 2393 2394 __ sub(R1, Rbcp, Rdisp); // branch bcp 2395 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 2396 2397 // R0: osr nmethod (osr ok) or NULL (osr not possible) 2398 const Register Rnmethod = R0; 2399 2400 __ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode 2401 2402 __ cbz(Rnmethod, dispatch); // test result, no osr if null 2403 2404 // nmethod may have been invalidated (VM may block upon call_VM return) 2405 __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset())); 2406 __ cmp(R1_tmp, nmethod::in_use); 2407 __ b(dispatch, ne); 2408 2409 // We have the address of an on stack replacement routine in Rnmethod, 2410 // We need to prepare to execute the OSR method. First we must 2411 // migrate the locals and monitors off of the stack. 2412 2413 __ mov(Rtmp_save0, Rnmethod); // save the nmethod 2414 2415 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 2416 2417 // R0 is OSR buffer 2418 2419 __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset())); 2420 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 2421 2422 #ifdef AARCH64 2423 __ ldp(FP, LR, Address(FP)); 2424 __ mov(SP, Rtemp); 2425 #else 2426 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 2427 __ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack 2428 #endif // AARCH64 2429 2430 __ jump(R1_tmp); 2431 } 2432 } 2433 } 2434 2435 2436 void TemplateTable::if_0cmp(Condition cc) { 2437 transition(itos, vtos); 2438 // assume branch is more often taken than not (loops use backward branches) 2439 Label not_taken; 2440 #ifdef AARCH64 2441 if (cc == equal) { 2442 __ cbnz_w(R0_tos, not_taken); 2443 } else if (cc == not_equal) { 2444 __ cbz_w(R0_tos, not_taken); 2445 } else { 2446 __ cmp_32(R0_tos, 0); 2447 __ b(not_taken, convNegCond(cc)); 2448 } 2449 #else 2450 __ cmp_32(R0_tos, 0); 2451 __ b(not_taken, convNegCond(cc)); 2452 #endif // AARCH64 2453 branch(false, false); 2454 __ bind(not_taken); 2455 __ profile_not_taken_branch(R0_tmp); 2456 } 2457 2458 2459 void TemplateTable::if_icmp(Condition cc) { 2460 transition(itos, vtos); 2461 // assume branch is more often taken than not (loops use backward branches) 2462 Label not_taken; 2463 __ pop_i(R1_tmp); 2464 __ cmp_32(R1_tmp, R0_tos); 2465 __ b(not_taken, convNegCond(cc)); 2466 branch(false, false); 2467 __ bind(not_taken); 2468 __ profile_not_taken_branch(R0_tmp); 2469 } 2470 2471 2472 void TemplateTable::if_nullcmp(Condition cc) { 2473 transition(atos, vtos); 2474 assert(cc == equal || cc == not_equal, "invalid condition"); 2475 2476 // assume branch is more often taken than not (loops use backward branches) 2477 Label not_taken; 2478 if (cc == equal) { 2479 __ cbnz(R0_tos, not_taken); 2480 } else { 2481 __ cbz(R0_tos, not_taken); 2482 } 2483 branch(false, false); 2484 __ bind(not_taken); 2485 __ profile_not_taken_branch(R0_tmp); 2486 } 2487 2488 2489 void TemplateTable::if_acmp(Condition cc) { 2490 transition(atos, vtos); 2491 // assume branch is more often taken than not (loops use backward branches) 2492 Label not_taken; 2493 __ pop_ptr(R1_tmp); 2494 __ cmp(R1_tmp, R0_tos); 2495 __ b(not_taken, convNegCond(cc)); 2496 branch(false, false); 2497 __ bind(not_taken); 2498 __ profile_not_taken_branch(R0_tmp); 2499 } 2500 2501 2502 void TemplateTable::ret() { 2503 transition(vtos, vtos); 2504 const Register Rlocal_index = R1_tmp; 2505 const Register Rret_bci = Rtmp_save0; // R4/R19 2506 2507 locals_index(Rlocal_index); 2508 Address local = load_iaddress(Rlocal_index, Rtemp); 2509 __ ldr_s32(Rret_bci, local); // get return bci, compute return bcp 2510 __ profile_ret(Rtmp_save1, Rret_bci); 2511 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 2512 __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset())); 2513 __ add(Rbcp, Rtemp, Rret_bci); 2514 __ dispatch_next(vtos); 2515 } 2516 2517 2518 void TemplateTable::wide_ret() { 2519 transition(vtos, vtos); 2520 const Register Rlocal_index = R1_tmp; 2521 const Register Rret_bci = Rtmp_save0; // R4/R19 2522 2523 locals_index_wide(Rlocal_index); 2524 Address local = load_iaddress(Rlocal_index, Rtemp); 2525 __ ldr_s32(Rret_bci, local); // get return bci, compute return bcp 2526 __ profile_ret(Rtmp_save1, Rret_bci); 2527 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 2528 __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset())); 2529 __ add(Rbcp, Rtemp, Rret_bci); 2530 __ dispatch_next(vtos); 2531 } 2532 2533 2534 void TemplateTable::tableswitch() { 2535 transition(itos, vtos); 2536 2537 const Register Rindex = R0_tos; 2538 #ifndef AARCH64 2539 const Register Rtemp2 = R1_tmp; 2540 #endif // !AARCH64 2541 const Register Rabcp = R2_tmp; // aligned bcp 2542 const Register Rlow = R3_tmp; 2543 const Register Rhigh = R4_tmp; 2544 const Register Roffset = R5_tmp; 2545 2546 // align bcp 2547 __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1)); 2548 __ align_reg(Rabcp, Rtemp, BytesPerInt); 2549 2550 // load lo & hi 2551 #ifdef AARCH64 2552 __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed)); 2553 #else 2554 __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback); 2555 #endif // AARCH64 2556 __ byteswap_u32(Rlow, Rtemp, Rtemp2); 2557 __ byteswap_u32(Rhigh, Rtemp, Rtemp2); 2558 2559 // compare index with high bound 2560 __ cmp_32(Rhigh, Rindex); 2561 2562 #ifdef AARCH64 2563 Label default_case, do_dispatch; 2564 __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge); 2565 __ b(default_case, lt); 2566 2567 __ sub_w(Rindex, Rindex, Rlow); 2568 __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt)); 2569 if(ProfileInterpreter) { 2570 __ sxtw(Rindex, Rindex); 2571 __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp); 2572 } 2573 __ b(do_dispatch); 2574 2575 __ bind(default_case); 2576 __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt)); 2577 if(ProfileInterpreter) { 2578 __ profile_switch_default(R0_tmp); 2579 } 2580 2581 __ bind(do_dispatch); 2582 #else 2583 2584 // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow) 2585 __ subs(Rindex, Rindex, Rlow, ge); 2586 2587 // if Rindex <= Rhigh and (Rindex - Rlow) >= 0 2588 // ("ge" status accumulated from cmp and subs instructions) then load 2589 // offset from table, otherwise load offset for default case 2590 2591 if(ProfileInterpreter) { 2592 Label default_case, continue_execution; 2593 2594 __ b(default_case, lt); 2595 __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt)); 2596 __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp); 2597 __ b(continue_execution); 2598 2599 __ bind(default_case); 2600 __ profile_switch_default(R0_tmp); 2601 __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt)); 2602 2603 __ bind(continue_execution); 2604 } else { 2605 __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt); 2606 __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge); 2607 } 2608 #endif // AARCH64 2609 2610 __ byteswap_u32(Roffset, Rtemp, Rtemp2); 2611 2612 // load the next bytecode to R3_bytecode and advance Rbcp 2613 #ifdef AARCH64 2614 __ add(Rbcp, Rbcp, Roffset, ex_sxtw); 2615 __ ldrb(R3_bytecode, Address(Rbcp)); 2616 #else 2617 __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed)); 2618 #endif // AARCH64 2619 __ dispatch_only(vtos); 2620 2621 } 2622 2623 2624 void TemplateTable::lookupswitch() { 2625 transition(itos, itos); 2626 __ stop("lookupswitch bytecode should have been rewritten"); 2627 } 2628 2629 2630 void TemplateTable::fast_linearswitch() { 2631 transition(itos, vtos); 2632 Label loop, found, default_case, continue_execution; 2633 2634 const Register Rkey = R0_tos; 2635 const Register Rabcp = R2_tmp; // aligned bcp 2636 const Register Rdefault = R3_tmp; 2637 const Register Rcount = R4_tmp; 2638 const Register Roffset = R5_tmp; 2639 2640 // bswap Rkey, so we can avoid bswapping the table entries 2641 __ byteswap_u32(Rkey, R1_tmp, Rtemp); 2642 2643 // align bcp 2644 __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1)); 2645 __ align_reg(Rabcp, Rtemp, BytesPerInt); 2646 2647 // load default & counter 2648 #ifdef AARCH64 2649 __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed)); 2650 #else 2651 __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback); 2652 #endif // AARCH64 2653 __ byteswap_u32(Rcount, R1_tmp, Rtemp); 2654 2655 #ifdef AARCH64 2656 __ cbz_w(Rcount, default_case); 2657 #else 2658 __ cmp_32(Rcount, 0); 2659 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne); 2660 __ b(default_case, eq); 2661 #endif // AARCH64 2662 2663 // table search 2664 __ bind(loop); 2665 #ifdef AARCH64 2666 __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed)); 2667 #endif // AARCH64 2668 __ cmp_32(Rtemp, Rkey); 2669 __ b(found, eq); 2670 __ subs(Rcount, Rcount, 1); 2671 #ifndef AARCH64 2672 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne); 2673 #endif // !AARCH64 2674 __ b(loop, ne); 2675 2676 // default case 2677 __ bind(default_case); 2678 __ profile_switch_default(R0_tmp); 2679 __ mov(Roffset, Rdefault); 2680 __ b(continue_execution); 2681 2682 // entry found -> get offset 2683 __ bind(found); 2684 // Rabcp is already incremented and points to the next entry 2685 __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt)); 2686 if (ProfileInterpreter) { 2687 // Calculate index of the selected case. 2688 assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp); 2689 2690 // align bcp 2691 __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1)); 2692 __ align_reg(R2_tmp, Rtemp, BytesPerInt); 2693 2694 // load number of cases 2695 __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt)); 2696 __ byteswap_u32(R2_tmp, R1_tmp, Rtemp); 2697 2698 // Selected index = <number of cases> - <current loop count> 2699 __ sub(R1_tmp, R2_tmp, Rcount); 2700 __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp); 2701 } 2702 2703 // continue execution 2704 __ bind(continue_execution); 2705 __ byteswap_u32(Roffset, R1_tmp, Rtemp); 2706 2707 // load the next bytecode to R3_bytecode and advance Rbcp 2708 #ifdef AARCH64 2709 __ add(Rbcp, Rbcp, Roffset, ex_sxtw); 2710 __ ldrb(R3_bytecode, Address(Rbcp)); 2711 #else 2712 __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed)); 2713 #endif // AARCH64 2714 __ dispatch_only(vtos); 2715 } 2716 2717 2718 void TemplateTable::fast_binaryswitch() { 2719 transition(itos, vtos); 2720 // Implementation using the following core algorithm: 2721 // 2722 // int binary_search(int key, LookupswitchPair* array, int n) { 2723 // // Binary search according to "Methodik des Programmierens" by 2724 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2725 // int i = 0; 2726 // int j = n; 2727 // while (i+1 < j) { 2728 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2729 // // with Q: for all i: 0 <= i < n: key < a[i] 2730 // // where a stands for the array and assuming that the (inexisting) 2731 // // element a[n] is infinitely big. 2732 // int h = (i + j) >> 1; 2733 // // i < h < j 2734 // if (key < array[h].fast_match()) { 2735 // j = h; 2736 // } else { 2737 // i = h; 2738 // } 2739 // } 2740 // // R: a[i] <= key < a[i+1] or Q 2741 // // (i.e., if key is within array, i is the correct index) 2742 // return i; 2743 // } 2744 2745 // register allocation 2746 const Register key = R0_tos; // already set (tosca) 2747 const Register array = R1_tmp; 2748 const Register i = R2_tmp; 2749 const Register j = R3_tmp; 2750 const Register h = R4_tmp; 2751 const Register val = R5_tmp; 2752 const Register temp1 = Rtemp; 2753 const Register temp2 = LR_tmp; 2754 const Register offset = R3_tmp; 2755 2756 // set 'array' = aligned bcp + 2 ints 2757 __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt); 2758 __ align_reg(array, temp1, BytesPerInt); 2759 2760 // initialize i & j 2761 __ mov(i, 0); // i = 0; 2762 __ ldr_s32(j, Address(array, -BytesPerInt)); // j = length(array); 2763 // Convert j into native byteordering 2764 __ byteswap_u32(j, temp1, temp2); 2765 2766 // and start 2767 Label entry; 2768 __ b(entry); 2769 2770 // binary search loop 2771 { Label loop; 2772 __ bind(loop); 2773 // int h = (i + j) >> 1; 2774 __ add(h, i, j); // h = i + j; 2775 __ logical_shift_right(h, h, 1); // h = (i + j) >> 1; 2776 // if (key < array[h].fast_match()) { 2777 // j = h; 2778 // } else { 2779 // i = h; 2780 // } 2781 #ifdef AARCH64 2782 __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt)); 2783 __ ldr_s32(val, Address(temp1)); 2784 #else 2785 __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt)); 2786 #endif // AARCH64 2787 // Convert array[h].match to native byte-ordering before compare 2788 __ byteswap_u32(val, temp1, temp2); 2789 __ cmp_32(key, val); 2790 __ mov(j, h, lt); // j = h if (key < array[h].fast_match()) 2791 __ mov(i, h, ge); // i = h if (key >= array[h].fast_match()) 2792 // while (i+1 < j) 2793 __ bind(entry); 2794 __ add(temp1, i, 1); // i+1 2795 __ cmp(temp1, j); // i+1 < j 2796 __ b(loop, lt); 2797 } 2798 2799 // end of binary search, result index is i (must check again!) 2800 Label default_case; 2801 // Convert array[i].match to native byte-ordering before compare 2802 #ifdef AARCH64 2803 __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt)); 2804 __ ldr_s32(val, Address(temp1)); 2805 #else 2806 __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt)); 2807 #endif // AARCH64 2808 __ byteswap_u32(val, temp1, temp2); 2809 __ cmp_32(key, val); 2810 __ b(default_case, ne); 2811 2812 // entry found 2813 __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt)); 2814 __ ldr_s32(offset, Address(temp1, 1*BytesPerInt)); 2815 __ profile_switch_case(R0, i, R1, i); 2816 __ byteswap_u32(offset, temp1, temp2); 2817 #ifdef AARCH64 2818 __ add(Rbcp, Rbcp, offset, ex_sxtw); 2819 __ ldrb(R3_bytecode, Address(Rbcp)); 2820 #else 2821 __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed)); 2822 #endif // AARCH64 2823 __ dispatch_only(vtos); 2824 2825 // default case 2826 __ bind(default_case); 2827 __ profile_switch_default(R0); 2828 __ ldr_s32(offset, Address(array, -2*BytesPerInt)); 2829 __ byteswap_u32(offset, temp1, temp2); 2830 #ifdef AARCH64 2831 __ add(Rbcp, Rbcp, offset, ex_sxtw); 2832 __ ldrb(R3_bytecode, Address(Rbcp)); 2833 #else 2834 __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed)); 2835 #endif // AARCH64 2836 __ dispatch_only(vtos); 2837 } 2838 2839 2840 void TemplateTable::_return(TosState state) { 2841 transition(state, state); 2842 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation 2843 2844 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2845 Label skip_register_finalizer; 2846 assert(state == vtos, "only valid state"); 2847 __ ldr(R1, aaddress(0)); 2848 __ load_klass(Rtemp, R1); 2849 __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset())); 2850 __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer); 2851 2852 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1); 2853 2854 __ bind(skip_register_finalizer); 2855 } 2856 2857 // Narrow result if state is itos but result type is smaller. 2858 // Need to narrow in the return bytecode rather than in generate_return_entry 2859 // since compiled code callers expect the result to already be narrowed. 2860 if (state == itos) { 2861 __ narrow(R0_tos); 2862 } 2863 __ remove_activation(state, LR); 2864 2865 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 2866 2867 #ifndef AARCH64 2868 // According to interpreter calling conventions, result is returned in R0/R1, 2869 // so ftos (S0) and dtos (D0) are moved to R0/R1. 2870 // This conversion should be done after remove_activation, as it uses 2871 // push(state) & pop(state) to preserve return value. 2872 __ convert_tos_to_retval(state); 2873 #endif // !AARCH64 2874 2875 __ ret(); 2876 2877 __ nop(); // to avoid filling CPU pipeline with invalid instructions 2878 __ nop(); 2879 } 2880 2881 2882 // ---------------------------------------------------------------------------- 2883 // Volatile variables demand their effects be made known to all CPU's in 2884 // order. Store buffers on most chips allow reads & writes to reorder; the 2885 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2886 // memory barrier (i.e., it's not sufficient that the interpreter does not 2887 // reorder volatile references, the hardware also must not reorder them). 2888 // 2889 // According to the new Java Memory Model (JMM): 2890 // (1) All volatiles are serialized wrt to each other. 2891 // ALSO reads & writes act as aquire & release, so: 2892 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2893 // the read float up to before the read. It's OK for non-volatile memory refs 2894 // that happen before the volatile read to float down below it. 2895 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2896 // that happen BEFORE the write float down to after the write. It's OK for 2897 // non-volatile memory refs that happen after the volatile write to float up 2898 // before it. 2899 // 2900 // We only put in barriers around volatile refs (they are expensive), not 2901 // _between_ memory refs (that would require us to track the flavor of the 2902 // previous memory refs). Requirements (2) and (3) require some barriers 2903 // before volatile stores and after volatile loads. These nearly cover 2904 // requirement (1) but miss the volatile-store-volatile-load case. This final 2905 // case is placed after volatile-stores although it could just as well go 2906 // before volatile-loads. 2907 // TODO-AARCH64: consider removing extra unused parameters 2908 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint, 2909 Register tmp, 2910 bool preserve_flags, 2911 Register load_tgt) { 2912 #ifdef AARCH64 2913 __ membar(order_constraint); 2914 #else 2915 __ membar(order_constraint, tmp, preserve_flags, load_tgt); 2916 #endif 2917 } 2918 2919 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR. 2920 void TemplateTable::resolve_cache_and_index(int byte_no, 2921 Register Rcache, 2922 Register Rindex, 2923 size_t index_size) { 2924 assert_different_registers(Rcache, Rindex, Rtemp); 2925 2926 Label resolved; 2927 Bytecodes::Code code = bytecode(); 2928 switch (code) { 2929 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2930 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2931 } 2932 2933 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2934 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size); 2935 __ cmp(Rtemp, code); // have we resolved this bytecode? 2936 __ b(resolved, eq); 2937 2938 // resolve first time through 2939 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2940 __ mov(R1, code); 2941 __ call_VM(noreg, entry, R1); 2942 // Update registers with resolved info 2943 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 2944 __ bind(resolved); 2945 } 2946 2947 2948 // The Rcache and Rindex registers must be set before call 2949 void TemplateTable::load_field_cp_cache_entry(Register Rcache, 2950 Register Rindex, 2951 Register Roffset, 2952 Register Rflags, 2953 Register Robj, 2954 bool is_static = false) { 2955 2956 assert_different_registers(Rcache, Rindex, Rtemp); 2957 assert_different_registers(Roffset, Rflags, Robj, Rtemp); 2958 2959 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2960 2961 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 2962 2963 // Field offset 2964 __ ldr(Roffset, Address(Rtemp, 2965 cp_base_offset + ConstantPoolCacheEntry::f2_offset())); 2966 2967 // Flags 2968 __ ldr_u32(Rflags, Address(Rtemp, 2969 cp_base_offset + ConstantPoolCacheEntry::flags_offset())); 2970 2971 if (is_static) { 2972 __ ldr(Robj, Address(Rtemp, 2973 cp_base_offset + ConstantPoolCacheEntry::f1_offset())); 2974 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2975 __ ldr(Robj, Address(Robj, mirror_offset)); 2976 __ resolve_oop_handle(Robj); 2977 } 2978 } 2979 2980 2981 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR. 2982 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2983 Register method, 2984 Register itable_index, 2985 Register flags, 2986 bool is_invokevirtual, 2987 bool is_invokevfinal/*unused*/, 2988 bool is_invokedynamic) { 2989 // setup registers 2990 const Register cache = R2_tmp; 2991 const Register index = R3_tmp; 2992 const Register temp_reg = Rtemp; 2993 assert_different_registers(cache, index, temp_reg); 2994 assert_different_registers(method, itable_index, temp_reg); 2995 2996 // determine constant pool cache field offsets 2997 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2998 const int method_offset = in_bytes( 2999 ConstantPoolCache::base_offset() + 3000 ((byte_no == f2_byte) 3001 ? ConstantPoolCacheEntry::f2_offset() 3002 : ConstantPoolCacheEntry::f1_offset() 3003 ) 3004 ); 3005 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 3006 ConstantPoolCacheEntry::flags_offset()); 3007 // access constant pool cache fields 3008 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 3009 ConstantPoolCacheEntry::f2_offset()); 3010 3011 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 3012 resolve_cache_and_index(byte_no, cache, index, index_size); 3013 __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord)); 3014 __ ldr(method, Address(temp_reg, method_offset)); 3015 3016 if (itable_index != noreg) { 3017 __ ldr(itable_index, Address(temp_reg, index_offset)); 3018 } 3019 __ ldr_u32(flags, Address(temp_reg, flags_offset)); 3020 } 3021 3022 3023 // The registers cache and index expected to be set before call, and should not be Rtemp. 3024 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, 3025 // except cache and index registers which are preserved. 3026 void TemplateTable::jvmti_post_field_access(Register Rcache, 3027 Register Rindex, 3028 bool is_static, 3029 bool has_tos) { 3030 assert_different_registers(Rcache, Rindex, Rtemp); 3031 3032 if (__ can_post_field_access()) { 3033 // Check to see if a field access watch has been set before we take 3034 // the time to call into the VM. 3035 3036 Label Lcontinue; 3037 3038 __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr()); 3039 __ cbz(Rtemp, Lcontinue); 3040 3041 // cache entry pointer 3042 __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3043 __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset())); 3044 if (is_static) { 3045 __ mov(R1, 0); // NULL object reference 3046 } else { 3047 __ pop(atos); // Get the object 3048 __ mov(R1, R0_tos); 3049 __ verify_oop(R1); 3050 __ push(atos); // Restore stack state 3051 } 3052 // R1: object pointer or NULL 3053 // R2: cache entry pointer 3054 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 3055 R1, R2); 3056 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3057 3058 __ bind(Lcontinue); 3059 } 3060 } 3061 3062 3063 void TemplateTable::pop_and_check_object(Register r) { 3064 __ pop_ptr(r); 3065 __ null_check(r, Rtemp); // for field access must check obj. 3066 __ verify_oop(r); 3067 } 3068 3069 3070 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 3071 transition(vtos, vtos); 3072 3073 const Register Roffset = R2_tmp; 3074 const Register Robj = R3_tmp; 3075 const Register Rcache = R4_tmp; 3076 const Register Rflagsav = Rtmp_save0; // R4/R19 3077 const Register Rindex = R5_tmp; 3078 const Register Rflags = R5_tmp; 3079 3080 const bool gen_volatile_check = os::is_MP(); 3081 3082 resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2)); 3083 jvmti_post_field_access(Rcache, Rindex, is_static, false); 3084 load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static); 3085 3086 if (gen_volatile_check) { 3087 __ mov(Rflagsav, Rflags); 3088 } 3089 3090 if (!is_static) pop_and_check_object(Robj); 3091 3092 Label Done, Lint, Ltable, shouldNotReachHere; 3093 Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos; 3094 3095 // compute type 3096 __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift); 3097 // Make sure we don't need to mask flags after the above shift 3098 ConstantPoolCacheEntry::verify_tos_state_shift(); 3099 3100 // There are actually two versions of implementation of getfield/getstatic: 3101 // 3102 // 32-bit ARM: 3103 // 1) Table switch using add(PC,...) instruction (fast_version) 3104 // 2) Table switch using ldr(PC,...) instruction 3105 // 3106 // AArch64: 3107 // 1) Table switch using adr/add/br instructions (fast_version) 3108 // 2) Table switch using adr/ldr/br instructions 3109 // 3110 // First version requires fixed size of code block for each case and 3111 // can not be used in RewriteBytecodes and VerifyOops 3112 // modes. 3113 3114 // Size of fixed size code block for fast_version 3115 const int log_max_block_size = 2; 3116 const int max_block_size = 1 << log_max_block_size; 3117 3118 // Decide if fast version is enabled 3119 bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop; 3120 3121 // On 32-bit ARM atos and itos cases can be merged only for fast version, because 3122 // atos requires additional processing in slow version. 3123 // On AArch64 atos and itos cannot be merged. 3124 bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version); 3125 3126 assert(number_of_states == 10, "number of tos states should be equal to 9"); 3127 3128 __ cmp(Rflags, itos); 3129 #ifdef AARCH64 3130 __ b(Lint, eq); 3131 3132 if(fast_version) { 3133 __ adr(Rtemp, Lbtos); 3134 __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize)); 3135 __ br(Rtemp); 3136 } else { 3137 __ adr(Rtemp, Ltable); 3138 __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags)); 3139 __ br(Rtemp); 3140 } 3141 #else 3142 if(atos_merged_with_itos) { 3143 __ cmp(Rflags, atos, ne); 3144 } 3145 3146 // table switch by type 3147 if(fast_version) { 3148 __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne); 3149 } else { 3150 __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); 3151 } 3152 3153 // jump to itos/atos case 3154 __ b(Lint); 3155 #endif // AARCH64 3156 3157 // table with addresses for slow version 3158 if (fast_version) { 3159 // nothing to do 3160 } else { 3161 AARCH64_ONLY(__ align(wordSize)); 3162 __ bind(Ltable); 3163 __ emit_address(Lbtos); 3164 __ emit_address(Lztos); 3165 __ emit_address(Lctos); 3166 __ emit_address(Lstos); 3167 __ emit_address(Litos); 3168 __ emit_address(Lltos); 3169 __ emit_address(Lftos); 3170 __ emit_address(Ldtos); 3171 __ emit_address(Latos); 3172 } 3173 3174 #ifdef ASSERT 3175 int seq = 0; 3176 #endif 3177 // btos 3178 { 3179 assert(btos == seq++, "btos has unexpected value"); 3180 FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version); 3181 __ bind(Lbtos); 3182 __ ldrsb(R0_tos, Address(Robj, Roffset)); 3183 __ push(btos); 3184 // Rewrite bytecode to be faster 3185 if (!is_static && rc == may_rewrite) { 3186 patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp); 3187 } 3188 __ b(Done); 3189 } 3190 3191 // ztos (same as btos for getfield) 3192 { 3193 assert(ztos == seq++, "btos has unexpected value"); 3194 FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version); 3195 __ bind(Lztos); 3196 __ ldrsb(R0_tos, Address(Robj, Roffset)); 3197 __ push(ztos); 3198 // Rewrite bytecode to be faster (use btos fast getfield) 3199 if (!is_static && rc == may_rewrite) { 3200 patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp); 3201 } 3202 __ b(Done); 3203 } 3204 3205 // ctos 3206 { 3207 assert(ctos == seq++, "ctos has unexpected value"); 3208 FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version); 3209 __ bind(Lctos); 3210 __ ldrh(R0_tos, Address(Robj, Roffset)); 3211 __ push(ctos); 3212 if (!is_static && rc == may_rewrite) { 3213 patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp); 3214 } 3215 __ b(Done); 3216 } 3217 3218 // stos 3219 { 3220 assert(stos == seq++, "stos has unexpected value"); 3221 FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version); 3222 __ bind(Lstos); 3223 __ ldrsh(R0_tos, Address(Robj, Roffset)); 3224 __ push(stos); 3225 if (!is_static && rc == may_rewrite) { 3226 patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp); 3227 } 3228 __ b(Done); 3229 } 3230 3231 // itos 3232 { 3233 assert(itos == seq++, "itos has unexpected value"); 3234 FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version); 3235 __ bind(Litos); 3236 __ b(shouldNotReachHere); 3237 } 3238 3239 // ltos 3240 { 3241 assert(ltos == seq++, "ltos has unexpected value"); 3242 FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version); 3243 __ bind(Lltos); 3244 #ifdef AARCH64 3245 __ ldr(R0_tos, Address(Robj, Roffset)); 3246 #else 3247 __ add(Roffset, Robj, Roffset); 3248 __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); 3249 #endif // AARCH64 3250 __ push(ltos); 3251 if (!is_static && rc == may_rewrite) { 3252 patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp); 3253 } 3254 __ b(Done); 3255 } 3256 3257 // ftos 3258 { 3259 assert(ftos == seq++, "ftos has unexpected value"); 3260 FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version); 3261 __ bind(Lftos); 3262 // floats and ints are placed on stack in same way, so 3263 // we can use push(itos) to transfer value without using VFP 3264 __ ldr_u32(R0_tos, Address(Robj, Roffset)); 3265 __ push(itos); 3266 if (!is_static && rc == may_rewrite) { 3267 patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp); 3268 } 3269 __ b(Done); 3270 } 3271 3272 // dtos 3273 { 3274 assert(dtos == seq++, "dtos has unexpected value"); 3275 FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version); 3276 __ bind(Ldtos); 3277 // doubles and longs are placed on stack in the same way, so 3278 // we can use push(ltos) to transfer value without using VFP 3279 #ifdef AARCH64 3280 __ ldr(R0_tos, Address(Robj, Roffset)); 3281 #else 3282 __ add(Rtemp, Robj, Roffset); 3283 __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi)); 3284 #endif // AARCH64 3285 __ push(ltos); 3286 if (!is_static && rc == may_rewrite) { 3287 patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp); 3288 } 3289 __ b(Done); 3290 } 3291 3292 // atos 3293 { 3294 assert(atos == seq++, "atos has unexpected value"); 3295 3296 // atos case for AArch64 and slow version on 32-bit ARM 3297 if(!atos_merged_with_itos) { 3298 __ bind(Latos); 3299 __ load_heap_oop(R0_tos, Address(Robj, Roffset)); 3300 __ push(atos); 3301 // Rewrite bytecode to be faster 3302 if (!is_static && rc == may_rewrite) { 3303 patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp); 3304 } 3305 __ b(Done); 3306 } 3307 } 3308 3309 assert(vtos == seq++, "vtos has unexpected value"); 3310 3311 __ bind(shouldNotReachHere); 3312 __ should_not_reach_here(); 3313 3314 // itos and atos cases are frequent so it makes sense to move them out of table switch 3315 // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only 3316 3317 __ bind(Lint); 3318 __ ldr_s32(R0_tos, Address(Robj, Roffset)); 3319 __ push(itos); 3320 // Rewrite bytecode to be faster 3321 if (!is_static && rc == may_rewrite) { 3322 patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp); 3323 } 3324 3325 __ bind(Done); 3326 3327 if (gen_volatile_check) { 3328 // Check for volatile field 3329 Label notVolatile; 3330 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3331 3332 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 3333 3334 __ bind(notVolatile); 3335 } 3336 3337 } 3338 3339 void TemplateTable::getfield(int byte_no) { 3340 getfield_or_static(byte_no, false); 3341 } 3342 3343 void TemplateTable::nofast_getfield(int byte_no) { 3344 getfield_or_static(byte_no, false, may_not_rewrite); 3345 } 3346 3347 void TemplateTable::getstatic(int byte_no) { 3348 getfield_or_static(byte_no, true); 3349 } 3350 3351 3352 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp. 3353 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, 3354 // except cache and index registers which are preserved. 3355 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) { 3356 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3357 assert_different_registers(Rcache, Rindex, R1, Rtemp); 3358 3359 if (__ can_post_field_modification()) { 3360 // Check to see if a field modification watch has been set before we take 3361 // the time to call into the VM. 3362 Label Lcontinue; 3363 3364 __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr()); 3365 __ cbz(Rtemp, Lcontinue); 3366 3367 if (is_static) { 3368 // Life is simple. Null out the object pointer. 3369 __ mov(R1, 0); 3370 } else { 3371 // Life is harder. The stack holds the value on top, followed by the object. 3372 // We don't know the size of the value, though; it could be one or two words 3373 // depending on its type. As a result, we must find the type to determine where 3374 // the object is. 3375 3376 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3377 __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset())); 3378 3379 __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift); 3380 // Make sure we don't need to mask Rtemp after the above shift 3381 ConstantPoolCacheEntry::verify_tos_state_shift(); 3382 3383 __ cmp(Rtemp, ltos); 3384 __ cond_cmp(Rtemp, dtos, ne); 3385 #ifdef AARCH64 3386 __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2)); 3387 __ mov(R1, Interpreter::expr_offset_in_bytes(1)); 3388 __ mov(R1, Rtemp, eq); 3389 __ ldr(R1, Address(Rstack_top, R1)); 3390 #else 3391 // two word value (ltos/dtos) 3392 __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq); 3393 3394 // one word value (not ltos, dtos) 3395 __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne); 3396 #endif // AARCH64 3397 } 3398 3399 // cache entry pointer 3400 __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3401 __ add(R2, R2, in_bytes(cp_base_offset)); 3402 3403 // object (tos) 3404 __ mov(R3, Rstack_top); 3405 3406 // R1: object pointer set up above (NULL if static) 3407 // R2: cache entry pointer 3408 // R3: value object on the stack 3409 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 3410 R1, R2, R3); 3411 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3412 3413 __ bind(Lcontinue); 3414 } 3415 } 3416 3417 3418 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 3419 transition(vtos, vtos); 3420 3421 const Register Roffset = R2_tmp; 3422 const Register Robj = R3_tmp; 3423 const Register Rcache = R4_tmp; 3424 const Register Rflagsav = Rtmp_save0; // R4/R19 3425 const Register Rindex = R5_tmp; 3426 const Register Rflags = R5_tmp; 3427 3428 const bool gen_volatile_check = os::is_MP(); 3429 3430 resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2)); 3431 jvmti_post_field_mod(Rcache, Rindex, is_static); 3432 load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static); 3433 3434 if (gen_volatile_check) { 3435 // Check for volatile field 3436 Label notVolatile; 3437 __ mov(Rflagsav, Rflags); 3438 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3439 3440 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 3441 3442 __ bind(notVolatile); 3443 } 3444 3445 Label Done, Lint, shouldNotReachHere; 3446 Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos; 3447 3448 // compute type 3449 __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift); 3450 // Make sure we don't need to mask flags after the above shift 3451 ConstantPoolCacheEntry::verify_tos_state_shift(); 3452 3453 // There are actually two versions of implementation of putfield/putstatic: 3454 // 3455 // 32-bit ARM: 3456 // 1) Table switch using add(PC,...) instruction (fast_version) 3457 // 2) Table switch using ldr(PC,...) instruction 3458 // 3459 // AArch64: 3460 // 1) Table switch using adr/add/br instructions (fast_version) 3461 // 2) Table switch using adr/ldr/br instructions 3462 // 3463 // First version requires fixed size of code block for each case and 3464 // can not be used in RewriteBytecodes and VerifyOops 3465 // modes. 3466 3467 // Size of fixed size code block for fast_version (in instructions) 3468 const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3); 3469 const int max_block_size = 1 << log_max_block_size; 3470 3471 // Decide if fast version is enabled 3472 bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits; 3473 3474 assert(number_of_states == 10, "number of tos states should be equal to 9"); 3475 3476 // itos case is frequent and is moved outside table switch 3477 __ cmp(Rflags, itos); 3478 3479 #ifdef AARCH64 3480 __ b(Lint, eq); 3481 3482 if (fast_version) { 3483 __ adr(Rtemp, Lbtos); 3484 __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize)); 3485 __ br(Rtemp); 3486 } else { 3487 __ adr(Rtemp, Ltable); 3488 __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags)); 3489 __ br(Rtemp); 3490 } 3491 #else 3492 // table switch by type 3493 if (fast_version) { 3494 __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne); 3495 } else { 3496 __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); 3497 } 3498 3499 // jump to itos case 3500 __ b(Lint); 3501 #endif // AARCH64 3502 3503 // table with addresses for slow version 3504 if (fast_version) { 3505 // nothing to do 3506 } else { 3507 AARCH64_ONLY(__ align(wordSize)); 3508 __ bind(Ltable); 3509 __ emit_address(Lbtos); 3510 __ emit_address(Lztos); 3511 __ emit_address(Lctos); 3512 __ emit_address(Lstos); 3513 __ emit_address(Litos); 3514 __ emit_address(Lltos); 3515 __ emit_address(Lftos); 3516 __ emit_address(Ldtos); 3517 __ emit_address(Latos); 3518 } 3519 3520 #ifdef ASSERT 3521 int seq = 0; 3522 #endif 3523 // btos 3524 { 3525 assert(btos == seq++, "btos has unexpected value"); 3526 FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version); 3527 __ bind(Lbtos); 3528 __ pop(btos); 3529 if (!is_static) pop_and_check_object(Robj); 3530 __ strb(R0_tos, Address(Robj, Roffset)); 3531 if (!is_static && rc == may_rewrite) { 3532 patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no); 3533 } 3534 __ b(Done); 3535 } 3536 3537 // ztos 3538 { 3539 assert(ztos == seq++, "ztos has unexpected value"); 3540 FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version); 3541 __ bind(Lztos); 3542 __ pop(ztos); 3543 if (!is_static) pop_and_check_object(Robj); 3544 __ and_32(R0_tos, R0_tos, 1); 3545 __ strb(R0_tos, Address(Robj, Roffset)); 3546 if (!is_static && rc == may_rewrite) { 3547 patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no); 3548 } 3549 __ b(Done); 3550 } 3551 3552 // ctos 3553 { 3554 assert(ctos == seq++, "ctos has unexpected value"); 3555 FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version); 3556 __ bind(Lctos); 3557 __ pop(ctos); 3558 if (!is_static) pop_and_check_object(Robj); 3559 __ strh(R0_tos, Address(Robj, Roffset)); 3560 if (!is_static && rc == may_rewrite) { 3561 patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no); 3562 } 3563 __ b(Done); 3564 } 3565 3566 // stos 3567 { 3568 assert(stos == seq++, "stos has unexpected value"); 3569 FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version); 3570 __ bind(Lstos); 3571 __ pop(stos); 3572 if (!is_static) pop_and_check_object(Robj); 3573 __ strh(R0_tos, Address(Robj, Roffset)); 3574 if (!is_static && rc == may_rewrite) { 3575 patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no); 3576 } 3577 __ b(Done); 3578 } 3579 3580 // itos 3581 { 3582 assert(itos == seq++, "itos has unexpected value"); 3583 FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version); 3584 __ bind(Litos); 3585 __ b(shouldNotReachHere); 3586 } 3587 3588 // ltos 3589 { 3590 assert(ltos == seq++, "ltos has unexpected value"); 3591 FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version); 3592 __ bind(Lltos); 3593 __ pop(ltos); 3594 if (!is_static) pop_and_check_object(Robj); 3595 #ifdef AARCH64 3596 __ str(R0_tos, Address(Robj, Roffset)); 3597 #else 3598 __ add(Roffset, Robj, Roffset); 3599 __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); 3600 #endif // AARCH64 3601 if (!is_static && rc == may_rewrite) { 3602 patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no); 3603 } 3604 __ b(Done); 3605 } 3606 3607 // ftos 3608 { 3609 assert(ftos == seq++, "ftos has unexpected value"); 3610 FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version); 3611 __ bind(Lftos); 3612 // floats and ints are placed on stack in the same way, so 3613 // we can use pop(itos) to transfer value without using VFP 3614 __ pop(itos); 3615 if (!is_static) pop_and_check_object(Robj); 3616 __ str_32(R0_tos, Address(Robj, Roffset)); 3617 if (!is_static && rc == may_rewrite) { 3618 patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no); 3619 } 3620 __ b(Done); 3621 } 3622 3623 // dtos 3624 { 3625 assert(dtos == seq++, "dtos has unexpected value"); 3626 FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version); 3627 __ bind(Ldtos); 3628 // doubles and longs are placed on stack in the same way, so 3629 // we can use pop(ltos) to transfer value without using VFP 3630 __ pop(ltos); 3631 if (!is_static) pop_and_check_object(Robj); 3632 #ifdef AARCH64 3633 __ str(R0_tos, Address(Robj, Roffset)); 3634 #else 3635 __ add(Rtemp, Robj, Roffset); 3636 __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi)); 3637 #endif // AARCH64 3638 if (!is_static && rc == may_rewrite) { 3639 patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no); 3640 } 3641 __ b(Done); 3642 } 3643 3644 // atos 3645 { 3646 assert(atos == seq++, "dtos has unexpected value"); 3647 __ bind(Latos); 3648 __ pop(atos); 3649 if (!is_static) pop_and_check_object(Robj); 3650 // Store into the field 3651 do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, _bs->kind(), false, false); 3652 if (!is_static && rc == may_rewrite) { 3653 patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no); 3654 } 3655 __ b(Done); 3656 } 3657 3658 __ bind(shouldNotReachHere); 3659 __ should_not_reach_here(); 3660 3661 // itos case is frequent and is moved outside table switch 3662 __ bind(Lint); 3663 __ pop(itos); 3664 if (!is_static) pop_and_check_object(Robj); 3665 __ str_32(R0_tos, Address(Robj, Roffset)); 3666 if (!is_static && rc == may_rewrite) { 3667 patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no); 3668 } 3669 3670 __ bind(Done); 3671 3672 if (gen_volatile_check) { 3673 Label notVolatile; 3674 if (is_static) { 3675 // Just check for volatile. Memory barrier for static final field 3676 // is handled by class initialization. 3677 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3678 volatile_barrier(MacroAssembler::StoreLoad, Rtemp); 3679 __ bind(notVolatile); 3680 } else { 3681 // Check for volatile field and final field 3682 Label skipMembar; 3683 3684 __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift | 3685 1 << ConstantPoolCacheEntry::is_final_shift); 3686 __ b(skipMembar, eq); 3687 3688 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3689 3690 // StoreLoad barrier after volatile field write 3691 volatile_barrier(MacroAssembler::StoreLoad, Rtemp); 3692 __ b(skipMembar); 3693 3694 // StoreStore barrier after final field write 3695 __ bind(notVolatile); 3696 volatile_barrier(MacroAssembler::StoreStore, Rtemp); 3697 3698 __ bind(skipMembar); 3699 } 3700 } 3701 3702 } 3703 3704 void TemplateTable::putfield(int byte_no) { 3705 putfield_or_static(byte_no, false); 3706 } 3707 3708 void TemplateTable::nofast_putfield(int byte_no) { 3709 putfield_or_static(byte_no, false, may_not_rewrite); 3710 } 3711 3712 void TemplateTable::putstatic(int byte_no) { 3713 putfield_or_static(byte_no, true); 3714 } 3715 3716 3717 void TemplateTable::jvmti_post_fast_field_mod() { 3718 // This version of jvmti_post_fast_field_mod() is not used on ARM 3719 Unimplemented(); 3720 } 3721 3722 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, 3723 // but preserves tosca with the given state. 3724 void TemplateTable::jvmti_post_fast_field_mod(TosState state) { 3725 if (__ can_post_field_modification()) { 3726 // Check to see if a field modification watch has been set before we take 3727 // the time to call into the VM. 3728 Label done; 3729 3730 __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr()); 3731 __ cbz(R2, done); 3732 3733 __ pop_ptr(R3); // copy the object pointer from tos 3734 __ verify_oop(R3); 3735 __ push_ptr(R3); // put the object pointer back on tos 3736 3737 __ push(state); // save value on the stack 3738 3739 // access constant pool cache entry 3740 __ get_cache_entry_pointer_at_bcp(R2, R1, 1); 3741 3742 __ mov(R1, R3); 3743 assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code"); 3744 __ mov(R3, Rstack_top); // put tos addr into R3 3745 3746 // R1: object pointer copied above 3747 // R2: cache entry pointer 3748 // R3: jvalue object on the stack 3749 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3); 3750 3751 __ pop(state); // restore value 3752 3753 __ bind(done); 3754 } 3755 } 3756 3757 3758 void TemplateTable::fast_storefield(TosState state) { 3759 transition(state, vtos); 3760 3761 ByteSize base = ConstantPoolCache::base_offset(); 3762 3763 jvmti_post_fast_field_mod(state); 3764 3765 const Register Rcache = R2_tmp; 3766 const Register Rindex = R3_tmp; 3767 const Register Roffset = R3_tmp; 3768 const Register Rflags = Rtmp_save0; // R4/R19 3769 const Register Robj = R5_tmp; 3770 3771 const bool gen_volatile_check = os::is_MP(); 3772 3773 // access constant pool cache 3774 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3775 3776 __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3777 3778 if (gen_volatile_check) { 3779 // load flags to test volatile 3780 __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset())); 3781 } 3782 3783 // replace index with field offset from cache entry 3784 __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset())); 3785 3786 if (gen_volatile_check) { 3787 // Check for volatile store 3788 Label notVolatile; 3789 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3790 3791 // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier 3792 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 3793 3794 __ bind(notVolatile); 3795 } 3796 3797 // Get object from stack 3798 pop_and_check_object(Robj); 3799 3800 // access field 3801 switch (bytecode()) { 3802 case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1); 3803 // fall through 3804 case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break; 3805 case Bytecodes::_fast_sputfield: // fall through 3806 case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break; 3807 case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break; 3808 #ifdef AARCH64 3809 case Bytecodes::_fast_lputfield: __ str (R0_tos, Address(Robj, Roffset)); break; 3810 case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break; 3811 case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break; 3812 #else 3813 case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset); 3814 __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break; 3815 3816 #ifdef __SOFTFP__ 3817 case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset)); break; 3818 case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset); 3819 __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break; 3820 #else 3821 case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset); 3822 __ fsts(S0_tos, Address(Robj)); break; 3823 case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset); 3824 __ fstd(D0_tos, Address(Robj)); break; 3825 #endif // __SOFTFP__ 3826 #endif // AARCH64 3827 3828 case Bytecodes::_fast_aputfield: 3829 do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, _bs->kind(), false, false); 3830 break; 3831 3832 default: 3833 ShouldNotReachHere(); 3834 } 3835 3836 if (gen_volatile_check) { 3837 Label notVolatile; 3838 Label skipMembar; 3839 __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift | 3840 1 << ConstantPoolCacheEntry::is_final_shift); 3841 __ b(skipMembar, eq); 3842 3843 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3844 3845 // StoreLoad barrier after volatile field write 3846 volatile_barrier(MacroAssembler::StoreLoad, Rtemp); 3847 __ b(skipMembar); 3848 3849 // StoreStore barrier after final field write 3850 __ bind(notVolatile); 3851 volatile_barrier(MacroAssembler::StoreStore, Rtemp); 3852 3853 __ bind(skipMembar); 3854 } 3855 } 3856 3857 3858 void TemplateTable::fast_accessfield(TosState state) { 3859 transition(atos, state); 3860 3861 // do the JVMTI work here to avoid disturbing the register state below 3862 if (__ can_post_field_access()) { 3863 // Check to see if a field access watch has been set before we take 3864 // the time to call into the VM. 3865 Label done; 3866 __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr()); 3867 __ cbz(R2, done); 3868 // access constant pool cache entry 3869 __ get_cache_entry_pointer_at_bcp(R2, R1, 1); 3870 __ push_ptr(R0_tos); // save object pointer before call_VM() clobbers it 3871 __ verify_oop(R0_tos); 3872 __ mov(R1, R0_tos); 3873 // R1: object pointer copied above 3874 // R2: cache entry pointer 3875 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2); 3876 __ pop_ptr(R0_tos); // restore object pointer 3877 3878 __ bind(done); 3879 } 3880 3881 const Register Robj = R0_tos; 3882 const Register Rcache = R2_tmp; 3883 const Register Rflags = R2_tmp; 3884 const Register Rindex = R3_tmp; 3885 const Register Roffset = R3_tmp; 3886 3887 const bool gen_volatile_check = os::is_MP(); 3888 3889 // access constant pool cache 3890 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1); 3891 // replace index with field offset from cache entry 3892 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3893 __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3894 3895 if (gen_volatile_check) { 3896 // load flags to test volatile 3897 __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 3898 } 3899 3900 __ verify_oop(Robj); 3901 __ null_check(Robj, Rtemp); 3902 3903 // access field 3904 switch (bytecode()) { 3905 case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break; 3906 case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break; 3907 case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break; 3908 case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break; 3909 #ifdef AARCH64 3910 case Bytecodes::_fast_lgetfield: __ ldr (R0_tos, Address(Robj, Roffset)); break; 3911 case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break; 3912 case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break; 3913 #else 3914 case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset); 3915 __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break; 3916 #ifdef __SOFTFP__ 3917 case Bytecodes::_fast_fgetfield: __ ldr (R0_tos, Address(Robj, Roffset)); break; 3918 case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); 3919 __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break; 3920 #else 3921 case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break; 3922 case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break; 3923 #endif // __SOFTFP__ 3924 #endif // AARCH64 3925 case Bytecodes::_fast_agetfield: __ load_heap_oop(R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break; 3926 default: 3927 ShouldNotReachHere(); 3928 } 3929 3930 if (gen_volatile_check) { 3931 // Check for volatile load 3932 Label notVolatile; 3933 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3934 3935 // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier 3936 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 3937 3938 __ bind(notVolatile); 3939 } 3940 } 3941 3942 3943 void TemplateTable::fast_xaccess(TosState state) { 3944 transition(vtos, state); 3945 3946 const Register Robj = R1_tmp; 3947 const Register Rcache = R2_tmp; 3948 const Register Rindex = R3_tmp; 3949 const Register Roffset = R3_tmp; 3950 const Register Rflags = R4_tmp; 3951 Label done; 3952 3953 // get receiver 3954 __ ldr(Robj, aaddress(0)); 3955 3956 // access constant pool cache 3957 __ get_cache_and_index_at_bcp(Rcache, Rindex, 2); 3958 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 3959 __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3960 3961 const bool gen_volatile_check = os::is_MP(); 3962 3963 if (gen_volatile_check) { 3964 // load flags to test volatile 3965 __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 3966 } 3967 3968 // make sure exception is reported in correct bcp range (getfield is next instruction) 3969 __ add(Rbcp, Rbcp, 1); 3970 __ null_check(Robj, Rtemp); 3971 __ sub(Rbcp, Rbcp, 1); 3972 3973 #ifdef AARCH64 3974 if (gen_volatile_check) { 3975 Label notVolatile; 3976 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 3977 3978 __ add(Rtemp, Robj, Roffset); 3979 3980 if (state == itos) { 3981 __ ldar_w(R0_tos, Rtemp); 3982 } else if (state == atos) { 3983 if (UseCompressedOops) { 3984 __ ldar_w(R0_tos, Rtemp); 3985 __ decode_heap_oop(R0_tos); 3986 } else { 3987 __ ldar(R0_tos, Rtemp); 3988 } 3989 __ verify_oop(R0_tos); 3990 } else if (state == ftos) { 3991 __ ldar_w(R0_tos, Rtemp); 3992 __ fmov_sw(S0_tos, R0_tos); 3993 } else { 3994 ShouldNotReachHere(); 3995 } 3996 __ b(done); 3997 3998 __ bind(notVolatile); 3999 } 4000 #endif // AARCH64 4001 4002 if (state == itos) { 4003 __ ldr_s32(R0_tos, Address(Robj, Roffset)); 4004 } else if (state == atos) { 4005 __ load_heap_oop(R0_tos, Address(Robj, Roffset)); 4006 __ verify_oop(R0_tos); 4007 } else if (state == ftos) { 4008 #ifdef AARCH64 4009 __ ldr_s(S0_tos, Address(Robj, Roffset)); 4010 #else 4011 #ifdef __SOFTFP__ 4012 __ ldr(R0_tos, Address(Robj, Roffset)); 4013 #else 4014 __ add(Roffset, Robj, Roffset); 4015 __ flds(S0_tos, Address(Roffset)); 4016 #endif // __SOFTFP__ 4017 #endif // AARCH64 4018 } else { 4019 ShouldNotReachHere(); 4020 } 4021 4022 #ifndef AARCH64 4023 if (gen_volatile_check) { 4024 // Check for volatile load 4025 Label notVolatile; 4026 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); 4027 4028 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 4029 4030 __ bind(notVolatile); 4031 } 4032 #endif // !AARCH64 4033 4034 __ bind(done); 4035 } 4036 4037 4038 4039 //---------------------------------------------------------------------------------------------------- 4040 // Calls 4041 4042 void TemplateTable::count_calls(Register method, Register temp) { 4043 // implemented elsewhere 4044 ShouldNotReachHere(); 4045 } 4046 4047 4048 void TemplateTable::prepare_invoke(int byte_no, 4049 Register method, // linked method (or i-klass) 4050 Register index, // itable index, MethodType, etc. 4051 Register recv, // if caller wants to see it 4052 Register flags // if caller wants to test it 4053 ) { 4054 // determine flags 4055 const Bytecodes::Code code = bytecode(); 4056 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 4057 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 4058 const bool is_invokehandle = code == Bytecodes::_invokehandle; 4059 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 4060 const bool is_invokespecial = code == Bytecodes::_invokespecial; 4061 const bool load_receiver = (recv != noreg); 4062 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 4063 assert(recv == noreg || recv == R2, ""); 4064 assert(flags == noreg || flags == R3, ""); 4065 4066 // setup registers & access constant pool cache 4067 if (recv == noreg) recv = R2; 4068 if (flags == noreg) flags = R3; 4069 const Register temp = Rtemp; 4070 const Register ret_type = R1_tmp; 4071 assert_different_registers(method, index, flags, recv, LR, ret_type, temp); 4072 4073 // save 'interpreter return address' 4074 __ save_bcp(); 4075 4076 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 4077 4078 // maybe push extra argument 4079 if (is_invokedynamic || is_invokehandle) { 4080 Label L_no_push; 4081 __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push); 4082 __ mov(temp, index); 4083 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 4084 __ load_resolved_reference_at_index(index, temp); 4085 __ verify_oop(index); 4086 __ push_ptr(index); // push appendix (MethodType, CallSite, etc.) 4087 __ bind(L_no_push); 4088 } 4089 4090 // load receiver if needed (after extra argument is pushed so parameter size is correct) 4091 if (load_receiver) { 4092 __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask); // get parameter size 4093 Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv); 4094 __ ldr(recv, recv_addr); 4095 __ verify_oop(recv); 4096 } 4097 4098 // compute return type 4099 __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift); 4100 // Make sure we don't need to mask flags after the above shift 4101 ConstantPoolCacheEntry::verify_tos_state_shift(); 4102 // load return address 4103 { const address table = (address) Interpreter::invoke_return_entry_table_for(code); 4104 __ mov_slow(temp, table); 4105 __ ldr(LR, Address::indexed_ptr(temp, ret_type)); 4106 } 4107 } 4108 4109 4110 void TemplateTable::invokevirtual_helper(Register index, 4111 Register recv, 4112 Register flags) { 4113 4114 const Register recv_klass = R2_tmp; 4115 4116 assert_different_registers(index, recv, flags, Rtemp); 4117 assert_different_registers(index, recv_klass, R0_tmp, Rtemp); 4118 4119 // Test for an invoke of a final method 4120 Label notFinal; 4121 __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal); 4122 4123 assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention"); 4124 4125 // do the call - the index is actually the method to call 4126 4127 // It's final, need a null check here! 4128 __ null_check(recv, Rtemp); 4129 4130 // profile this call 4131 __ profile_final_call(R0_tmp); 4132 4133 __ jump_from_interpreted(Rmethod); 4134 4135 __ bind(notFinal); 4136 4137 // get receiver klass 4138 __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes()); 4139 __ load_klass(recv_klass, recv); 4140 4141 // profile this call 4142 __ profile_virtual_call(R0_tmp, recv_klass); 4143 4144 // get target Method* & entry point 4145 const int base = in_bytes(Klass::vtable_start_offset()); 4146 assert(vtableEntry::size() == 1, "adjust the scaling in the code below"); 4147 __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize)); 4148 __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes())); 4149 __ jump_from_interpreted(Rmethod); 4150 } 4151 4152 void TemplateTable::invokevirtual(int byte_no) { 4153 transition(vtos, vtos); 4154 assert(byte_no == f2_byte, "use this argument"); 4155 4156 const Register Rrecv = R2_tmp; 4157 const Register Rflags = R3_tmp; 4158 4159 prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags); 4160 4161 // Rmethod: index 4162 // Rrecv: receiver 4163 // Rflags: flags 4164 // LR: return address 4165 4166 invokevirtual_helper(Rmethod, Rrecv, Rflags); 4167 } 4168 4169 4170 void TemplateTable::invokespecial(int byte_no) { 4171 transition(vtos, vtos); 4172 assert(byte_no == f1_byte, "use this argument"); 4173 const Register Rrecv = R2_tmp; 4174 prepare_invoke(byte_no, Rmethod, noreg, Rrecv); 4175 __ verify_oop(Rrecv); 4176 __ null_check(Rrecv, Rtemp); 4177 // do the call 4178 __ profile_call(Rrecv); 4179 __ jump_from_interpreted(Rmethod); 4180 } 4181 4182 4183 void TemplateTable::invokestatic(int byte_no) { 4184 transition(vtos, vtos); 4185 assert(byte_no == f1_byte, "use this argument"); 4186 prepare_invoke(byte_no, Rmethod); 4187 // do the call 4188 __ profile_call(R2_tmp); 4189 __ jump_from_interpreted(Rmethod); 4190 } 4191 4192 4193 void TemplateTable::fast_invokevfinal(int byte_no) { 4194 transition(vtos, vtos); 4195 assert(byte_no == f2_byte, "use this argument"); 4196 __ stop("fast_invokevfinal is not used on ARM"); 4197 } 4198 4199 4200 void TemplateTable::invokeinterface(int byte_no) { 4201 transition(vtos, vtos); 4202 assert(byte_no == f1_byte, "use this argument"); 4203 4204 const Register Ritable = R1_tmp; 4205 const Register Rrecv = R2_tmp; 4206 const Register Rinterf = R5_tmp; 4207 const Register Rindex = R4_tmp; 4208 const Register Rflags = R3_tmp; 4209 const Register Rklass = R3_tmp; 4210 4211 prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags); 4212 4213 // Special case of invokeinterface called for virtual method of 4214 // java.lang.Object. See cpCacheOop.cpp for details. 4215 // This code isn't produced by javac, but could be produced by 4216 // another compliant java compiler. 4217 Label notMethod; 4218 __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod); 4219 4220 invokevirtual_helper(Rmethod, Rrecv, Rflags); 4221 __ bind(notMethod); 4222 4223 // Get receiver klass into Rklass - also a null check 4224 __ load_klass(Rklass, Rrecv); 4225 4226 Label no_such_interface; 4227 4228 // Receiver subtype check against REFC. 4229 __ lookup_interface_method(// inputs: rec. class, interface 4230 Rklass, Rinterf, noreg, 4231 // outputs: scan temp. reg1, scan temp. reg2 4232 noreg, Ritable, Rtemp, 4233 no_such_interface); 4234 4235 // profile this call 4236 __ profile_virtual_call(R0_tmp, Rklass); 4237 4238 // Get declaring interface class from method 4239 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 4240 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 4241 __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes())); 4242 4243 // Get itable index from method 4244 __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset())); 4245 __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32 4246 __ neg(Rindex, Rtemp); 4247 4248 __ lookup_interface_method(// inputs: rec. class, interface 4249 Rklass, Rinterf, Rindex, 4250 // outputs: scan temp. reg1, scan temp. reg2 4251 Rmethod, Ritable, Rtemp, 4252 no_such_interface); 4253 4254 // Rmethod: Method* to call 4255 4256 // Check for abstract method error 4257 // Note: This should be done more efficiently via a throw_abstract_method_error 4258 // interpreter entry point and a conditional jump to it in case of a null 4259 // method. 4260 { Label L; 4261 __ cbnz(Rmethod, L); 4262 // throw exception 4263 // note: must restore interpreter registers to canonical 4264 // state for exception handling to work correctly! 4265 __ restore_method(); 4266 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 4267 // the call_VM checks for exception, so we should never return here. 4268 __ should_not_reach_here(); 4269 __ bind(L); 4270 } 4271 4272 // do the call 4273 __ jump_from_interpreted(Rmethod); 4274 4275 // throw exception 4276 __ bind(no_such_interface); 4277 __ restore_method(); 4278 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 4279 // the call_VM checks for exception, so we should never return here. 4280 __ should_not_reach_here(); 4281 } 4282 4283 void TemplateTable::invokehandle(int byte_no) { 4284 transition(vtos, vtos); 4285 4286 // TODO-AARCH64 review register usage 4287 const Register Rrecv = R2_tmp; 4288 const Register Rmtype = R4_tmp; 4289 const Register R5_method = R5_tmp; // can't reuse Rmethod! 4290 4291 prepare_invoke(byte_no, R5_method, Rmtype, Rrecv); 4292 __ null_check(Rrecv, Rtemp); 4293 4294 // Rmtype: MethodType object (from cpool->resolved_references[f1], if necessary) 4295 // Rmethod: MH.invokeExact_MT method (from f2) 4296 4297 // Note: Rmtype is already pushed (if necessary) by prepare_invoke 4298 4299 // do the call 4300 __ profile_final_call(R3_tmp); // FIXME: profile the LambdaForm also 4301 __ mov(Rmethod, R5_method); 4302 __ jump_from_interpreted(Rmethod); 4303 } 4304 4305 void TemplateTable::invokedynamic(int byte_no) { 4306 transition(vtos, vtos); 4307 4308 // TODO-AARCH64 review register usage 4309 const Register Rcallsite = R4_tmp; 4310 const Register R5_method = R5_tmp; // can't reuse Rmethod! 4311 4312 prepare_invoke(byte_no, R5_method, Rcallsite); 4313 4314 // Rcallsite: CallSite object (from cpool->resolved_references[f1]) 4315 // Rmethod: MH.linkToCallSite method (from f2) 4316 4317 // Note: Rcallsite is already pushed by prepare_invoke 4318 4319 if (ProfileInterpreter) { 4320 __ profile_call(R2_tmp); 4321 } 4322 4323 // do the call 4324 __ mov(Rmethod, R5_method); 4325 __ jump_from_interpreted(Rmethod); 4326 } 4327 4328 //---------------------------------------------------------------------------------------------------- 4329 // Allocation 4330 4331 void TemplateTable::_new() { 4332 transition(vtos, atos); 4333 4334 const Register Robj = R0_tos; 4335 const Register Rcpool = R1_tmp; 4336 const Register Rindex = R2_tmp; 4337 const Register Rtags = R3_tmp; 4338 const Register Rsize = R3_tmp; 4339 4340 Register Rklass = R4_tmp; 4341 assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp); 4342 assert_different_registers(Rcpool, Rindex, Rklass, Rsize); 4343 4344 Label slow_case; 4345 Label done; 4346 Label initialize_header; 4347 Label initialize_object; // including clearing the fields 4348 4349 const bool allow_shared_alloc = 4350 Universe::heap()->supports_inline_contig_alloc(); 4351 4352 // Literals 4353 InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL); 4354 4355 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4356 __ get_cpool_and_tags(Rcpool, Rtags); 4357 4358 // Make sure the class we're about to instantiate has been resolved. 4359 // This is done before loading InstanceKlass to be consistent with the order 4360 // how Constant Pool is updated (see ConstantPool::klass_at_put) 4361 const int tags_offset = Array<u1>::base_offset_in_bytes(); 4362 __ add(Rtemp, Rtags, Rindex); 4363 4364 #ifdef AARCH64 4365 __ add(Rtemp, Rtemp, tags_offset); 4366 __ ldarb(Rtemp, Rtemp); 4367 #else 4368 __ ldrb(Rtemp, Address(Rtemp, tags_offset)); 4369 4370 // use Rklass as a scratch 4371 volatile_barrier(MacroAssembler::LoadLoad, Rklass); 4372 #endif // AARCH64 4373 4374 // get InstanceKlass 4375 __ cmp(Rtemp, JVM_CONSTANT_Class); 4376 __ b(slow_case, ne); 4377 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass); 4378 4379 // make sure klass is initialized & doesn't have finalizer 4380 // make sure klass is fully initialized 4381 __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset())); 4382 __ cmp(Rtemp, InstanceKlass::fully_initialized); 4383 __ b(slow_case, ne); 4384 4385 // get instance_size in InstanceKlass (scaled to a count of bytes) 4386 __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset())); 4387 4388 // test to see if it has a finalizer or is malformed in some way 4389 // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number 4390 __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case); 4391 4392 // Allocate the instance: 4393 // If TLAB is enabled: 4394 // Try to allocate in the TLAB. 4395 // If fails, go to the slow path. 4396 // Else If inline contiguous allocations are enabled: 4397 // Try to allocate in eden. 4398 // If fails due to heap end, go to slow path. 4399 // 4400 // If TLAB is enabled OR inline contiguous is enabled: 4401 // Initialize the allocation. 4402 // Exit. 4403 // 4404 // Go to slow path. 4405 if (UseTLAB) { 4406 const Register Rtlab_top = R1_tmp; 4407 const Register Rtlab_end = R2_tmp; 4408 assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end); 4409 4410 __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset())); 4411 __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset()))); 4412 __ add(Rtlab_top, Robj, Rsize); 4413 __ cmp(Rtlab_top, Rtlab_end); 4414 __ b(slow_case, hi); 4415 __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset())); 4416 if (ZeroTLAB) { 4417 // the fields have been already cleared 4418 __ b(initialize_header); 4419 } else { 4420 // initialize both the header and fields 4421 __ b(initialize_object); 4422 } 4423 } else { 4424 // Allocation in the shared Eden, if allowed. 4425 if (allow_shared_alloc) { 4426 const Register Rheap_top_addr = R2_tmp; 4427 const Register Rheap_top = R5_tmp; 4428 const Register Rheap_end = Rtemp; 4429 assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR); 4430 4431 // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS 4432 __ ldr_literal(Rheap_top_addr, Lheap_top_addr); 4433 4434 Label retry; 4435 __ bind(retry); 4436 4437 #ifdef AARCH64 4438 __ ldxr(Robj, Rheap_top_addr); 4439 #else 4440 __ ldr(Robj, Address(Rheap_top_addr)); 4441 #endif // AARCH64 4442 4443 __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr())); 4444 __ add(Rheap_top, Robj, Rsize); 4445 __ cmp(Rheap_top, Rheap_end); 4446 __ b(slow_case, hi); 4447 4448 // Update heap top atomically. 4449 // If someone beats us on the allocation, try again, otherwise continue. 4450 #ifdef AARCH64 4451 __ stxr(Rtemp2, Rheap_top, Rheap_top_addr); 4452 __ cbnz_w(Rtemp2, retry); 4453 #else 4454 __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/); 4455 __ b(retry, ne); 4456 #endif // AARCH64 4457 4458 __ incr_allocated_bytes(Rsize, Rtemp); 4459 } 4460 } 4461 4462 if (UseTLAB || allow_shared_alloc) { 4463 const Register Rzero0 = R1_tmp; 4464 const Register Rzero1 = R2_tmp; 4465 const Register Rzero_end = R5_tmp; 4466 const Register Rzero_cur = Rtemp; 4467 assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end); 4468 4469 // The object is initialized before the header. If the object size is 4470 // zero, go directly to the header initialization. 4471 __ bind(initialize_object); 4472 __ subs(Rsize, Rsize, sizeof(oopDesc)); 4473 __ add(Rzero_cur, Robj, sizeof(oopDesc)); 4474 __ b(initialize_header, eq); 4475 4476 #ifdef ASSERT 4477 // make sure Rsize is a multiple of 8 4478 Label L; 4479 __ tst(Rsize, 0x07); 4480 __ b(L, eq); 4481 __ stop("object size is not multiple of 8 - adjust this code"); 4482 __ bind(L); 4483 #endif 4484 4485 #ifdef AARCH64 4486 { 4487 Label loop; 4488 // Step back by 1 word if object size is not a multiple of 2*wordSize. 4489 assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word"); 4490 __ andr(Rtemp2, Rsize, (uintx)wordSize); 4491 __ sub(Rzero_cur, Rzero_cur, Rtemp2); 4492 4493 // Zero by 2 words per iteration. 4494 __ bind(loop); 4495 __ subs(Rsize, Rsize, 2*wordSize); 4496 __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed)); 4497 __ b(loop, gt); 4498 } 4499 #else 4500 __ mov(Rzero0, 0); 4501 __ mov(Rzero1, 0); 4502 __ add(Rzero_end, Rzero_cur, Rsize); 4503 4504 // initialize remaining object fields: Rsize was a multiple of 8 4505 { Label loop; 4506 // loop is unrolled 2 times 4507 __ bind(loop); 4508 // #1 4509 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback); 4510 __ cmp(Rzero_cur, Rzero_end); 4511 // #2 4512 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne); 4513 __ cmp(Rzero_cur, Rzero_end, ne); 4514 __ b(loop, ne); 4515 } 4516 #endif // AARCH64 4517 4518 // initialize object header only. 4519 __ bind(initialize_header); 4520 if (UseBiasedLocking) { 4521 __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset())); 4522 } else { 4523 __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype()); 4524 } 4525 // mark 4526 __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes())); 4527 4528 // klass 4529 #ifdef AARCH64 4530 __ store_klass_gap(Robj); 4531 #endif // AARCH64 4532 __ store_klass(Rklass, Robj); // blows Rklass: 4533 Rklass = noreg; 4534 4535 // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation 4536 if (DTraceAllocProbes) { 4537 // Trigger dtrace event for fastpath 4538 Label Lcontinue; 4539 4540 __ ldrb_global(Rtemp, (address)&DTraceAllocProbes); 4541 __ cbz(Rtemp, Lcontinue); 4542 4543 __ push(atos); 4544 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj); 4545 __ pop(atos); 4546 4547 __ bind(Lcontinue); 4548 } 4549 4550 __ b(done); 4551 } else { 4552 // jump over literals 4553 __ b(slow_case); 4554 } 4555 4556 if (allow_shared_alloc) { 4557 __ bind_literal(Lheap_top_addr); 4558 } 4559 4560 // slow case 4561 __ bind(slow_case); 4562 __ get_constant_pool(Rcpool); 4563 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4564 __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 4565 4566 // continue 4567 __ bind(done); 4568 4569 // StoreStore barrier required after complete initialization 4570 // (headers + content zeroing), before the object may escape. 4571 __ membar(MacroAssembler::StoreStore, R1_tmp); 4572 } 4573 4574 4575 void TemplateTable::newarray() { 4576 transition(itos, atos); 4577 __ ldrb(R1, at_bcp(1)); 4578 __ mov(R2, R0_tos); 4579 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2); 4580 // MacroAssembler::StoreStore useless (included in the runtime exit path) 4581 } 4582 4583 4584 void TemplateTable::anewarray() { 4585 transition(itos, atos); 4586 __ get_unsigned_2_byte_index_at_bcp(R2, 1); 4587 __ get_constant_pool(R1); 4588 __ mov(R3, R0_tos); 4589 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3); 4590 // MacroAssembler::StoreStore useless (included in the runtime exit path) 4591 } 4592 4593 4594 void TemplateTable::arraylength() { 4595 transition(atos, itos); 4596 __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes()); 4597 __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes())); 4598 } 4599 4600 4601 void TemplateTable::checkcast() { 4602 transition(atos, atos); 4603 Label done, is_null, quicked, resolved, throw_exception; 4604 4605 const Register Robj = R0_tos; 4606 const Register Rcpool = R2_tmp; 4607 const Register Rtags = R3_tmp; 4608 const Register Rindex = R4_tmp; 4609 const Register Rsuper = R3_tmp; 4610 const Register Rsub = R4_tmp; 4611 const Register Rsubtype_check_tmp1 = R1_tmp; 4612 const Register Rsubtype_check_tmp2 = LR_tmp; 4613 4614 __ cbz(Robj, is_null); 4615 4616 // Get cpool & tags index 4617 __ get_cpool_and_tags(Rcpool, Rtags); 4618 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4619 4620 // See if bytecode has already been quicked 4621 __ add(Rtemp, Rtags, Rindex); 4622 #ifdef AARCH64 4623 // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough 4624 __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes()); 4625 __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier 4626 #else 4627 __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes())); 4628 #endif // AARCH64 4629 4630 __ cmp(Rtemp, JVM_CONSTANT_Class); 4631 4632 #ifndef AARCH64 4633 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true); 4634 #endif // !AARCH64 4635 4636 __ b(quicked, eq); 4637 4638 __ push(atos); 4639 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4640 // vm_result_2 has metadata result 4641 __ get_vm_result_2(Rsuper, Robj); 4642 __ pop_ptr(Robj); 4643 __ b(resolved); 4644 4645 __ bind(throw_exception); 4646 // Come here on failure of subtype check 4647 __ profile_typecheck_failed(R1_tmp); 4648 __ mov(R2_ClassCastException_obj, Robj); // convention with generate_ClassCastException_handler() 4649 __ b(Interpreter::_throw_ClassCastException_entry); 4650 4651 // Get superklass in Rsuper and subklass in Rsub 4652 __ bind(quicked); 4653 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper); 4654 4655 __ bind(resolved); 4656 __ load_klass(Rsub, Robj); 4657 4658 // Generate subtype check. Blows both tmps and Rtemp. 4659 assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp); 4660 __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2); 4661 4662 // Come here on success 4663 4664 // Collect counts on whether this check-cast sees NULLs a lot or not. 4665 if (ProfileInterpreter) { 4666 __ b(done); 4667 __ bind(is_null); 4668 __ profile_null_seen(R1_tmp); 4669 } else { 4670 __ bind(is_null); // same as 'done' 4671 } 4672 __ bind(done); 4673 } 4674 4675 4676 void TemplateTable::instanceof() { 4677 // result = 0: obj == NULL or obj is not an instanceof the specified klass 4678 // result = 1: obj != NULL and obj is an instanceof the specified klass 4679 4680 transition(atos, itos); 4681 Label done, is_null, not_subtype, quicked, resolved; 4682 4683 const Register Robj = R0_tos; 4684 const Register Rcpool = R2_tmp; 4685 const Register Rtags = R3_tmp; 4686 const Register Rindex = R4_tmp; 4687 const Register Rsuper = R3_tmp; 4688 const Register Rsub = R4_tmp; 4689 const Register Rsubtype_check_tmp1 = R0_tmp; 4690 const Register Rsubtype_check_tmp2 = R1_tmp; 4691 4692 __ cbz(Robj, is_null); 4693 4694 __ load_klass(Rsub, Robj); 4695 4696 // Get cpool & tags index 4697 __ get_cpool_and_tags(Rcpool, Rtags); 4698 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); 4699 4700 // See if bytecode has already been quicked 4701 __ add(Rtemp, Rtags, Rindex); 4702 #ifdef AARCH64 4703 // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough 4704 __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes()); 4705 __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier 4706 #else 4707 __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes())); 4708 #endif // AARCH64 4709 __ cmp(Rtemp, JVM_CONSTANT_Class); 4710 4711 #ifndef AARCH64 4712 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true); 4713 #endif // !AARCH64 4714 4715 __ b(quicked, eq); 4716 4717 __ push(atos); 4718 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4719 // vm_result_2 has metadata result 4720 __ get_vm_result_2(Rsuper, Robj); 4721 __ pop_ptr(Robj); 4722 __ b(resolved); 4723 4724 // Get superklass in Rsuper and subklass in Rsub 4725 __ bind(quicked); 4726 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper); 4727 4728 __ bind(resolved); 4729 __ load_klass(Rsub, Robj); 4730 4731 // Generate subtype check. Blows both tmps and Rtemp. 4732 __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2); 4733 4734 // Come here on success 4735 __ mov(R0_tos, 1); 4736 __ b(done); 4737 4738 __ bind(not_subtype); 4739 // Come here on failure 4740 __ profile_typecheck_failed(R1_tmp); 4741 __ mov(R0_tos, 0); 4742 4743 // Collect counts on whether this test sees NULLs a lot or not. 4744 if (ProfileInterpreter) { 4745 __ b(done); 4746 __ bind(is_null); 4747 __ profile_null_seen(R1_tmp); 4748 } else { 4749 __ bind(is_null); // same as 'done' 4750 } 4751 __ bind(done); 4752 } 4753 4754 4755 //---------------------------------------------------------------------------------------------------- 4756 // Breakpoints 4757 void TemplateTable::_breakpoint() { 4758 4759 // Note: We get here even if we are single stepping.. 4760 // jbug inists on setting breakpoints at every bytecode 4761 // even if we are in single step mode. 4762 4763 transition(vtos, vtos); 4764 4765 // get the unpatched byte code 4766 __ mov(R1, Rmethod); 4767 __ mov(R2, Rbcp); 4768 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2); 4769 #ifdef AARCH64 4770 __ sxtw(Rtmp_save0, R0); 4771 #else 4772 __ mov(Rtmp_save0, R0); 4773 #endif // AARCH64 4774 4775 // post the breakpoint event 4776 __ mov(R1, Rmethod); 4777 __ mov(R2, Rbcp); 4778 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2); 4779 4780 // complete the execution of original bytecode 4781 __ mov(R3_bytecode, Rtmp_save0); 4782 __ dispatch_only_normal(vtos); 4783 } 4784 4785 4786 //---------------------------------------------------------------------------------------------------- 4787 // Exceptions 4788 4789 void TemplateTable::athrow() { 4790 transition(atos, vtos); 4791 __ mov(Rexception_obj, R0_tos); 4792 __ null_check(Rexception_obj, Rtemp); 4793 __ b(Interpreter::throw_exception_entry()); 4794 } 4795 4796 4797 //---------------------------------------------------------------------------------------------------- 4798 // Synchronization 4799 // 4800 // Note: monitorenter & exit are symmetric routines; which is reflected 4801 // in the assembly code structure as well 4802 // 4803 // Stack layout: 4804 // 4805 // [expressions ] <--- Rstack_top = expression stack top 4806 // .. 4807 // [expressions ] 4808 // [monitor entry] <--- monitor block top = expression stack bot 4809 // .. 4810 // [monitor entry] 4811 // [frame data ] <--- monitor block bot 4812 // ... 4813 // [saved FP ] <--- FP 4814 4815 4816 void TemplateTable::monitorenter() { 4817 transition(atos, vtos); 4818 4819 const Register Robj = R0_tos; 4820 const Register Rentry = R1_tmp; 4821 4822 // check for NULL object 4823 __ null_check(Robj, Rtemp); 4824 4825 const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize); 4826 assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment"); 4827 Label allocate_monitor, allocated; 4828 4829 // initialize entry pointer 4830 __ mov(Rentry, 0); // points to free slot or NULL 4831 4832 // find a free slot in the monitor block (result in Rentry) 4833 { Label loop, exit; 4834 const Register Rcur = R2_tmp; 4835 const Register Rcur_obj = Rtemp; 4836 const Register Rbottom = R3_tmp; 4837 assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj); 4838 4839 __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4840 // points to current entry, starting with top-most entry 4841 __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); 4842 // points to word before bottom of monitor block 4843 4844 __ cmp(Rcur, Rbottom); // check if there are no monitors 4845 #ifndef AARCH64 4846 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 4847 // prefetch monitor's object for the first iteration 4848 #endif // !AARCH64 4849 __ b(allocate_monitor, eq); // there are no monitors, skip searching 4850 4851 __ bind(loop); 4852 #ifdef AARCH64 4853 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); 4854 #endif // AARCH64 4855 __ cmp(Rcur_obj, 0); // check if current entry is used 4856 __ mov(Rentry, Rcur, eq); // if not used then remember entry 4857 4858 __ cmp(Rcur_obj, Robj); // check if current entry is for same object 4859 __ b(exit, eq); // if same object then stop searching 4860 4861 __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry 4862 4863 __ cmp(Rcur, Rbottom); // check if bottom reached 4864 #ifndef AARCH64 4865 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 4866 // prefetch monitor's object for the next iteration 4867 #endif // !AARCH64 4868 __ b(loop, ne); // if not at bottom then check this entry 4869 __ bind(exit); 4870 } 4871 4872 __ cbnz(Rentry, allocated); // check if a slot has been found; if found, continue with that one 4873 4874 __ bind(allocate_monitor); 4875 4876 // allocate one if there's no free slot 4877 { Label loop; 4878 assert_different_registers(Robj, Rentry, R2_tmp, Rtemp); 4879 4880 // 1. compute new pointers 4881 4882 #ifdef AARCH64 4883 __ check_extended_sp(Rtemp); 4884 __ sub(SP, SP, entry_size); // adjust extended SP 4885 __ mov(Rtemp, SP); 4886 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 4887 #endif // AARCH64 4888 4889 __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4890 // old monitor block top / expression stack bottom 4891 4892 __ sub(Rstack_top, Rstack_top, entry_size); // move expression stack top 4893 __ check_stack_top_on_expansion(); 4894 4895 __ sub(Rentry, Rentry, entry_size); // move expression stack bottom 4896 4897 __ mov(R2_tmp, Rstack_top); // set start value for copy loop 4898 4899 __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4900 // set new monitor block top 4901 4902 // 2. move expression stack contents 4903 4904 __ cmp(R2_tmp, Rentry); // check if expression stack is empty 4905 #ifndef AARCH64 4906 __ ldr(Rtemp, Address(R2_tmp, entry_size), ne); // load expression stack word from old location 4907 #endif // !AARCH64 4908 __ b(allocated, eq); 4909 4910 __ bind(loop); 4911 #ifdef AARCH64 4912 __ ldr(Rtemp, Address(R2_tmp, entry_size)); // load expression stack word from old location 4913 #endif // AARCH64 4914 __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location 4915 // and advance to next word 4916 __ cmp(R2_tmp, Rentry); // check if bottom reached 4917 #ifndef AARCH64 4918 __ ldr(Rtemp, Address(R2, entry_size), ne); // load expression stack word from old location 4919 #endif // !AARCH64 4920 __ b(loop, ne); // if not at bottom then copy next word 4921 } 4922 4923 // call run-time routine 4924 4925 // Rentry: points to monitor entry 4926 __ bind(allocated); 4927 4928 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4929 // The object has already been poped from the stack, so the expression stack looks correct. 4930 __ add(Rbcp, Rbcp, 1); 4931 4932 __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes())); // store object 4933 __ lock_object(Rentry); 4934 4935 // check to make sure this monitor doesn't cause stack overflow after locking 4936 __ save_bcp(); // in case of exception 4937 __ arm_stack_overflow_check(0, Rtemp); 4938 4939 // The bcp has already been incremented. Just need to dispatch to next instruction. 4940 __ dispatch_next(vtos); 4941 } 4942 4943 4944 void TemplateTable::monitorexit() { 4945 transition(atos, vtos); 4946 4947 const Register Robj = R0_tos; 4948 const Register Rcur = R1_tmp; 4949 const Register Rbottom = R2_tmp; 4950 const Register Rcur_obj = Rtemp; 4951 4952 // check for NULL object 4953 __ null_check(Robj, Rtemp); 4954 4955 const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize); 4956 Label found, throw_exception; 4957 4958 // find matching slot 4959 { Label loop; 4960 assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj); 4961 4962 __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 4963 // points to current entry, starting with top-most entry 4964 __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); 4965 // points to word before bottom of monitor block 4966 4967 __ cmp(Rcur, Rbottom); // check if bottom reached 4968 #ifndef AARCH64 4969 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 4970 // prefetch monitor's object for the first iteration 4971 #endif // !AARCH64 4972 __ b(throw_exception, eq); // throw exception if there are now monitors 4973 4974 __ bind(loop); 4975 #ifdef AARCH64 4976 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); 4977 #endif // AARCH64 4978 // check if current entry is for same object 4979 __ cmp(Rcur_obj, Robj); 4980 __ b(found, eq); // if same object then stop searching 4981 __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry 4982 __ cmp(Rcur, Rbottom); // check if bottom reached 4983 #ifndef AARCH64 4984 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); 4985 #endif // !AARCH64 4986 __ b (loop, ne); // if not at bottom then check this entry 4987 } 4988 4989 // error handling. Unlocking was not block-structured 4990 __ bind(throw_exception); 4991 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4992 __ should_not_reach_here(); 4993 4994 // call run-time routine 4995 // Rcur: points to monitor entry 4996 __ bind(found); 4997 __ push_ptr(Robj); // make sure object is on stack (contract with oopMaps) 4998 __ unlock_object(Rcur); 4999 __ pop_ptr(Robj); // discard object 5000 } 5001 5002 5003 //---------------------------------------------------------------------------------------------------- 5004 // Wide instructions 5005 5006 void TemplateTable::wide() { 5007 transition(vtos, vtos); 5008 __ ldrb(R3_bytecode, at_bcp(1)); 5009 5010 InlinedAddress Ltable((address)Interpreter::_wentry_point); 5011 __ ldr_literal(Rtemp, Ltable); 5012 __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp); 5013 5014 __ nop(); // to avoid filling CPU pipeline with invalid instructions 5015 __ nop(); 5016 __ bind_literal(Ltable); 5017 } 5018 5019 5020 //---------------------------------------------------------------------------------------------------- 5021 // Multi arrays 5022 5023 void TemplateTable::multianewarray() { 5024 transition(vtos, atos); 5025 __ ldrb(Rtmp_save0, at_bcp(3)); // get number of dimensions 5026 5027 // last dim is on top of stack; we want address of first one: 5028 // first_addr = last_addr + ndims * stackElementSize - 1*wordsize 5029 // the latter wordSize to point to the beginning of the array. 5030 __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize)); 5031 __ sub(R1, Rtemp, wordSize); 5032 5033 call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1); 5034 __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize)); 5035 // MacroAssembler::StoreStore useless (included in the runtime exit path) 5036 }