1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/templateInterpreter.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/safepointMechanism.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "utilities/macros.hpp" 44 45 #undef __ 46 #define __ _masm-> 47 48 // ============================================================================ 49 // Misc helpers 50 51 // Do an oop store like *(base + index) = val OR *(base + offset) = val 52 // (only one of both variants is possible at the same time). 53 // Index can be noreg. 54 // Kills: 55 // Rbase, Rtmp 56 static void do_oop_store(InterpreterMacroAssembler* _masm, 57 Register base, 58 RegisterOrConstant offset, 59 Register val, // Noreg means always null. 60 Register tmp1, 61 Register tmp2, 62 Register tmp3, 63 DecoratorSet decorators) { 64 assert_different_registers(tmp1, tmp2, tmp3, val, base); 65 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 66 bs->store_at(_masm, decorators, T_OBJECT, base, offset, val, tmp1, tmp2, tmp3, false); 67 } 68 69 static void do_oop_load(InterpreterMacroAssembler* _masm, 70 Register base, 71 RegisterOrConstant offset, 72 Register dst, 73 Register tmp1, 74 Register tmp2, 75 DecoratorSet decorators) { 76 assert_different_registers(base, tmp1, tmp2); 77 assert_different_registers(dst, tmp1, tmp2); 78 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 79 bs->load_at(_masm, decorators, T_OBJECT, base, offset, dst, tmp1, tmp2, false); 80 } 81 82 // ============================================================================ 83 // Platform-dependent initialization 84 85 void TemplateTable::pd_initialize() { 86 // No ppc64 specific initialization. 87 } 88 89 Address TemplateTable::at_bcp(int offset) { 90 // Not used on ppc. 91 ShouldNotReachHere(); 92 return Address(); 93 } 94 95 // Patches the current bytecode (ptr to it located in bcp) 96 // in the bytecode stream with a new one. 97 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 98 // With sharing on, may need to test method flag. 99 if (!RewriteBytecodes) return; 100 Label L_patch_done; 101 102 switch (new_bc) { 103 case Bytecodes::_fast_aputfield: 104 case Bytecodes::_fast_bputfield: 105 case Bytecodes::_fast_zputfield: 106 case Bytecodes::_fast_cputfield: 107 case Bytecodes::_fast_dputfield: 108 case Bytecodes::_fast_fputfield: 109 case Bytecodes::_fast_iputfield: 110 case Bytecodes::_fast_lputfield: 111 case Bytecodes::_fast_sputfield: 112 { 113 // We skip bytecode quickening for putfield instructions when 114 // the put_code written to the constant pool cache is zero. 115 // This is required so that every execution of this instruction 116 // calls out to InterpreterRuntime::resolve_get_put to do 117 // additional, required work. 118 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 119 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 120 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 121 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 122 #if defined(VM_LITTLE_ENDIAN) 123 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 124 #else 125 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 126 #endif 127 __ cmpwi(CCR0, Rnew_bc, 0); 128 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 129 __ beq(CCR0, L_patch_done); 130 // __ isync(); // acquire not needed 131 break; 132 } 133 134 default: 135 assert(byte_no == -1, "sanity"); 136 if (load_bc_into_bc_reg) { 137 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 138 } 139 } 140 141 if (JvmtiExport::can_post_breakpoint()) { 142 Label L_fast_patch; 143 __ lbz(Rtemp, 0, R14_bcp); 144 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 145 __ bne(CCR0, L_fast_patch); 146 // Perform the quickening, slowly, in the bowels of the breakpoint table. 147 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 148 __ b(L_patch_done); 149 __ bind(L_fast_patch); 150 } 151 152 // Patch bytecode. 153 __ stb(Rnew_bc, 0, R14_bcp); 154 155 __ bind(L_patch_done); 156 } 157 158 // ============================================================================ 159 // Individual instructions 160 161 void TemplateTable::nop() { 162 transition(vtos, vtos); 163 // Nothing to do. 164 } 165 166 void TemplateTable::shouldnotreachhere() { 167 transition(vtos, vtos); 168 __ stop("shouldnotreachhere bytecode"); 169 } 170 171 void TemplateTable::aconst_null() { 172 transition(vtos, atos); 173 __ li(R17_tos, 0); 174 } 175 176 void TemplateTable::iconst(int value) { 177 transition(vtos, itos); 178 assert(value >= -1 && value <= 5, ""); 179 __ li(R17_tos, value); 180 } 181 182 void TemplateTable::lconst(int value) { 183 transition(vtos, ltos); 184 assert(value >= -1 && value <= 5, ""); 185 __ li(R17_tos, value); 186 } 187 188 void TemplateTable::fconst(int value) { 189 transition(vtos, ftos); 190 static float zero = 0.0; 191 static float one = 1.0; 192 static float two = 2.0; 193 switch (value) { 194 default: ShouldNotReachHere(); 195 case 0: { 196 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 197 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 198 break; 199 } 200 case 1: { 201 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 202 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 203 break; 204 } 205 case 2: { 206 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 207 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 208 break; 209 } 210 } 211 } 212 213 void TemplateTable::dconst(int value) { 214 transition(vtos, dtos); 215 static double zero = 0.0; 216 static double one = 1.0; 217 switch (value) { 218 case 0: { 219 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 220 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 221 break; 222 } 223 case 1: { 224 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 225 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 226 break; 227 } 228 default: ShouldNotReachHere(); 229 } 230 } 231 232 void TemplateTable::bipush() { 233 transition(vtos, itos); 234 __ lbz(R17_tos, 1, R14_bcp); 235 __ extsb(R17_tos, R17_tos); 236 } 237 238 void TemplateTable::sipush() { 239 transition(vtos, itos); 240 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 241 } 242 243 void TemplateTable::ldc(bool wide) { 244 Register Rscratch1 = R11_scratch1, 245 Rscratch2 = R12_scratch2, 246 Rcpool = R3_ARG1; 247 248 transition(vtos, vtos); 249 Label notInt, notFloat, notClass, exit; 250 251 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 252 if (wide) { // Read index. 253 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 254 } else { 255 __ lbz(Rscratch1, 1, R14_bcp); 256 } 257 258 const int base_offset = ConstantPool::header_size() * wordSize; 259 const int tags_offset = Array<u1>::base_offset_in_bytes(); 260 261 // Get type from tags. 262 __ addi(Rscratch2, Rscratch2, tags_offset); 263 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 264 265 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 266 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 267 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 268 269 // Resolved class - need to call vm to get java mirror of the class. 270 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 271 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 272 __ beq(CCR0, notClass); 273 274 __ li(R4, wide ? 1 : 0); 275 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 276 __ push(atos); 277 __ b(exit); 278 279 __ align(32, 12); 280 __ bind(notClass); 281 __ addi(Rcpool, Rcpool, base_offset); 282 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 283 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 284 __ bne(CCR0, notInt); 285 __ lwax(R17_tos, Rcpool, Rscratch1); 286 __ push(itos); 287 __ b(exit); 288 289 __ align(32, 12); 290 __ bind(notInt); 291 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 292 __ bne(CCR0, notFloat); 293 __ lfsx(F15_ftos, Rcpool, Rscratch1); 294 __ push(ftos); 295 __ b(exit); 296 297 __ align(32, 12); 298 // assume the tag is for condy; if not, the VM runtime will tell us 299 __ bind(notFloat); 300 condy_helper(exit); 301 302 __ align(32, 12); 303 __ bind(exit); 304 } 305 306 // Fast path for caching oop constants. 307 void TemplateTable::fast_aldc(bool wide) { 308 transition(vtos, atos); 309 310 int index_size = wide ? sizeof(u2) : sizeof(u1); 311 const Register Rscratch = R11_scratch1; 312 Label is_null; 313 314 // We are resolved if the resolved reference cache entry contains a 315 // non-null object (CallSite, etc.) 316 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 317 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 318 319 // Convert null sentinel to NULL. 320 int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); 321 __ ld(Rscratch, simm16_rest, Rscratch); 322 __ cmpld(CCR0, R17_tos, Rscratch); 323 if (VM_Version::has_isel()) { 324 __ isel_0(R17_tos, CCR0, Assembler::equal); 325 } else { 326 Label not_sentinel; 327 __ bne(CCR0, not_sentinel); 328 __ li(R17_tos, 0); 329 __ bind(not_sentinel); 330 } 331 __ verify_oop(R17_tos); 332 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 333 334 __ bind(is_null); 335 __ load_const_optimized(R3_ARG1, (int)bytecode()); 336 337 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 338 339 // First time invocation - must resolve first. 340 __ call_VM(R17_tos, entry, R3_ARG1); 341 __ verify_oop(R17_tos); 342 } 343 344 void TemplateTable::ldc2_w() { 345 transition(vtos, vtos); 346 Label not_double, not_long, exit; 347 348 Register Rindex = R11_scratch1, 349 Rcpool = R12_scratch2, 350 Rtag = R3_ARG1; 351 __ get_cpool_and_tags(Rcpool, Rtag); 352 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 353 354 const int base_offset = ConstantPool::header_size() * wordSize; 355 const int tags_offset = Array<u1>::base_offset_in_bytes(); 356 // Get type from tags. 357 __ addi(Rcpool, Rcpool, base_offset); 358 __ addi(Rtag, Rtag, tags_offset); 359 360 __ lbzx(Rtag, Rtag, Rindex); 361 __ sldi(Rindex, Rindex, LogBytesPerWord); 362 363 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 364 __ bne(CCR0, not_double); 365 __ lfdx(F15_ftos, Rcpool, Rindex); 366 __ push(dtos); 367 __ b(exit); 368 369 __ bind(not_double); 370 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); 371 __ bne(CCR0, not_long); 372 __ ldx(R17_tos, Rcpool, Rindex); 373 __ push(ltos); 374 __ b(exit); 375 376 __ bind(not_long); 377 condy_helper(exit); 378 379 __ align(32, 12); 380 __ bind(exit); 381 } 382 383 void TemplateTable::condy_helper(Label& Done) { 384 const Register obj = R31; 385 const Register off = R11_scratch1; 386 const Register flags = R12_scratch2; 387 const Register rarg = R4_ARG2; 388 __ li(rarg, (int)bytecode()); 389 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); 390 __ get_vm_result_2(flags); 391 392 // VMr = obj = base address to find primitive value to push 393 // VMr2 = flags = (tos, off) using format of CPCE::_flags 394 __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); 395 396 // What sort of thing are we loading? 397 __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 398 399 switch (bytecode()) { 400 case Bytecodes::_ldc: 401 case Bytecodes::_ldc_w: 402 { 403 // tos in (itos, ftos, stos, btos, ctos, ztos) 404 Label notInt, notFloat, notShort, notByte, notChar, notBool; 405 __ cmplwi(CCR0, flags, itos); 406 __ bne(CCR0, notInt); 407 // itos 408 __ lwax(R17_tos, obj, off); 409 __ push(itos); 410 __ b(Done); 411 412 __ bind(notInt); 413 __ cmplwi(CCR0, flags, ftos); 414 __ bne(CCR0, notFloat); 415 // ftos 416 __ lfsx(F15_ftos, obj, off); 417 __ push(ftos); 418 __ b(Done); 419 420 __ bind(notFloat); 421 __ cmplwi(CCR0, flags, stos); 422 __ bne(CCR0, notShort); 423 // stos 424 __ lhax(R17_tos, obj, off); 425 __ push(stos); 426 __ b(Done); 427 428 __ bind(notShort); 429 __ cmplwi(CCR0, flags, btos); 430 __ bne(CCR0, notByte); 431 // btos 432 __ lbzx(R17_tos, obj, off); 433 __ extsb(R17_tos, R17_tos); 434 __ push(btos); 435 __ b(Done); 436 437 __ bind(notByte); 438 __ cmplwi(CCR0, flags, ctos); 439 __ bne(CCR0, notChar); 440 // ctos 441 __ lhzx(R17_tos, obj, off); 442 __ push(ctos); 443 __ b(Done); 444 445 __ bind(notChar); 446 __ cmplwi(CCR0, flags, ztos); 447 __ bne(CCR0, notBool); 448 // ztos 449 __ lbzx(R17_tos, obj, off); 450 __ push(ztos); 451 __ b(Done); 452 453 __ bind(notBool); 454 break; 455 } 456 457 case Bytecodes::_ldc2_w: 458 { 459 Label notLong, notDouble; 460 __ cmplwi(CCR0, flags, ltos); 461 __ bne(CCR0, notLong); 462 // ltos 463 __ ldx(R17_tos, obj, off); 464 __ push(ltos); 465 __ b(Done); 466 467 __ bind(notLong); 468 __ cmplwi(CCR0, flags, dtos); 469 __ bne(CCR0, notDouble); 470 // dtos 471 __ lfdx(F15_ftos, obj, off); 472 __ push(dtos); 473 __ b(Done); 474 475 __ bind(notDouble); 476 break; 477 } 478 479 default: 480 ShouldNotReachHere(); 481 } 482 483 __ stop("bad ldc/condy"); 484 } 485 486 // Get the locals index located in the bytecode stream at bcp + offset. 487 void TemplateTable::locals_index(Register Rdst, int offset) { 488 __ lbz(Rdst, offset, R14_bcp); 489 } 490 491 void TemplateTable::iload() { 492 iload_internal(); 493 } 494 495 void TemplateTable::nofast_iload() { 496 iload_internal(may_not_rewrite); 497 } 498 499 void TemplateTable::iload_internal(RewriteControl rc) { 500 transition(vtos, itos); 501 502 // Get the local value into tos 503 const Register Rindex = R22_tmp2; 504 locals_index(Rindex); 505 506 // Rewrite iload,iload pair into fast_iload2 507 // iload,caload pair into fast_icaload 508 if (RewriteFrequentPairs && rc == may_rewrite) { 509 Label Lrewrite, Ldone; 510 Register Rnext_byte = R3_ARG1, 511 Rrewrite_to = R6_ARG4, 512 Rscratch = R11_scratch1; 513 514 // get next byte 515 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 516 517 // if _iload, wait to rewrite to iload2. We only want to rewrite the 518 // last two iloads in a pair. Comparing against fast_iload means that 519 // the next bytecode is neither an iload or a caload, and therefore 520 // an iload pair. 521 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 522 __ beq(CCR0, Ldone); 523 524 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 525 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 526 __ beq(CCR1, Lrewrite); 527 528 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 529 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 530 __ beq(CCR0, Lrewrite); 531 532 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 533 534 __ bind(Lrewrite); 535 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 536 __ bind(Ldone); 537 } 538 539 __ load_local_int(R17_tos, Rindex, Rindex); 540 } 541 542 // Load 2 integers in a row without dispatching 543 void TemplateTable::fast_iload2() { 544 transition(vtos, itos); 545 546 __ lbz(R3_ARG1, 1, R14_bcp); 547 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 548 549 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 550 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 551 __ push_i(R3_ARG1); 552 } 553 554 void TemplateTable::fast_iload() { 555 transition(vtos, itos); 556 // Get the local value into tos 557 558 const Register Rindex = R11_scratch1; 559 locals_index(Rindex); 560 __ load_local_int(R17_tos, Rindex, Rindex); 561 } 562 563 // Load a local variable type long from locals area to TOS cache register. 564 // Local index resides in bytecodestream. 565 void TemplateTable::lload() { 566 transition(vtos, ltos); 567 568 const Register Rindex = R11_scratch1; 569 locals_index(Rindex); 570 __ load_local_long(R17_tos, Rindex, Rindex); 571 } 572 573 void TemplateTable::fload() { 574 transition(vtos, ftos); 575 576 const Register Rindex = R11_scratch1; 577 locals_index(Rindex); 578 __ load_local_float(F15_ftos, Rindex, Rindex); 579 } 580 581 void TemplateTable::dload() { 582 transition(vtos, dtos); 583 584 const Register Rindex = R11_scratch1; 585 locals_index(Rindex); 586 __ load_local_double(F15_ftos, Rindex, Rindex); 587 } 588 589 void TemplateTable::aload() { 590 transition(vtos, atos); 591 592 const Register Rindex = R11_scratch1; 593 locals_index(Rindex); 594 __ load_local_ptr(R17_tos, Rindex, Rindex); 595 } 596 597 void TemplateTable::locals_index_wide(Register Rdst) { 598 // Offset is 2, not 1, because Lbcp points to wide prefix code. 599 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 600 } 601 602 void TemplateTable::wide_iload() { 603 // Get the local value into tos. 604 605 const Register Rindex = R11_scratch1; 606 locals_index_wide(Rindex); 607 __ load_local_int(R17_tos, Rindex, Rindex); 608 } 609 610 void TemplateTable::wide_lload() { 611 transition(vtos, ltos); 612 613 const Register Rindex = R11_scratch1; 614 locals_index_wide(Rindex); 615 __ load_local_long(R17_tos, Rindex, Rindex); 616 } 617 618 void TemplateTable::wide_fload() { 619 transition(vtos, ftos); 620 621 const Register Rindex = R11_scratch1; 622 locals_index_wide(Rindex); 623 __ load_local_float(F15_ftos, Rindex, Rindex); 624 } 625 626 void TemplateTable::wide_dload() { 627 transition(vtos, dtos); 628 629 const Register Rindex = R11_scratch1; 630 locals_index_wide(Rindex); 631 __ load_local_double(F15_ftos, Rindex, Rindex); 632 } 633 634 void TemplateTable::wide_aload() { 635 transition(vtos, atos); 636 637 const Register Rindex = R11_scratch1; 638 locals_index_wide(Rindex); 639 __ load_local_ptr(R17_tos, Rindex, Rindex); 640 } 641 642 void TemplateTable::iaload() { 643 transition(itos, itos); 644 645 const Register Rload_addr = R3_ARG1, 646 Rarray = R4_ARG2, 647 Rtemp = R5_ARG3; 648 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 649 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 650 } 651 652 void TemplateTable::laload() { 653 transition(itos, ltos); 654 655 const Register Rload_addr = R3_ARG1, 656 Rarray = R4_ARG2, 657 Rtemp = R5_ARG3; 658 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 659 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 660 } 661 662 void TemplateTable::faload() { 663 transition(itos, ftos); 664 665 const Register Rload_addr = R3_ARG1, 666 Rarray = R4_ARG2, 667 Rtemp = R5_ARG3; 668 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 669 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 670 } 671 672 void TemplateTable::daload() { 673 transition(itos, dtos); 674 675 const Register Rload_addr = R3_ARG1, 676 Rarray = R4_ARG2, 677 Rtemp = R5_ARG3; 678 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 679 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 680 } 681 682 void TemplateTable::aaload() { 683 transition(itos, atos); 684 685 // tos: index 686 // result tos: array 687 const Register Rload_addr = R3_ARG1, 688 Rarray = R4_ARG2, 689 Rtemp = R5_ARG3, 690 Rtemp2 = R31; 691 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 692 do_oop_load(_masm, Rload_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos, Rtemp, Rtemp2, 693 IN_HEAP | IN_HEAP_ARRAY); 694 __ verify_oop(R17_tos); 695 //__ dcbt(R17_tos); // prefetch 696 } 697 698 void TemplateTable::baload() { 699 transition(itos, itos); 700 701 const Register Rload_addr = R3_ARG1, 702 Rarray = R4_ARG2, 703 Rtemp = R5_ARG3; 704 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 705 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 706 __ extsb(R17_tos, R17_tos); 707 } 708 709 void TemplateTable::caload() { 710 transition(itos, itos); 711 712 const Register Rload_addr = R3_ARG1, 713 Rarray = R4_ARG2, 714 Rtemp = R5_ARG3; 715 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 716 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 717 } 718 719 // Iload followed by caload frequent pair. 720 void TemplateTable::fast_icaload() { 721 transition(vtos, itos); 722 723 const Register Rload_addr = R3_ARG1, 724 Rarray = R4_ARG2, 725 Rtemp = R11_scratch1; 726 727 locals_index(R17_tos); 728 __ load_local_int(R17_tos, Rtemp, R17_tos); 729 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 730 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 731 } 732 733 void TemplateTable::saload() { 734 transition(itos, itos); 735 736 const Register Rload_addr = R11_scratch1, 737 Rarray = R12_scratch2, 738 Rtemp = R3_ARG1; 739 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 740 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 741 } 742 743 void TemplateTable::iload(int n) { 744 transition(vtos, itos); 745 746 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 747 } 748 749 void TemplateTable::lload(int n) { 750 transition(vtos, ltos); 751 752 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 753 } 754 755 void TemplateTable::fload(int n) { 756 transition(vtos, ftos); 757 758 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 759 } 760 761 void TemplateTable::dload(int n) { 762 transition(vtos, dtos); 763 764 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 765 } 766 767 void TemplateTable::aload(int n) { 768 transition(vtos, atos); 769 770 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 771 } 772 773 void TemplateTable::aload_0() { 774 aload_0_internal(); 775 } 776 777 void TemplateTable::nofast_aload_0() { 778 aload_0_internal(may_not_rewrite); 779 } 780 781 void TemplateTable::aload_0_internal(RewriteControl rc) { 782 transition(vtos, atos); 783 // According to bytecode histograms, the pairs: 784 // 785 // _aload_0, _fast_igetfield 786 // _aload_0, _fast_agetfield 787 // _aload_0, _fast_fgetfield 788 // 789 // occur frequently. If RewriteFrequentPairs is set, the (slow) 790 // _aload_0 bytecode checks if the next bytecode is either 791 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 792 // rewrites the current bytecode into a pair bytecode; otherwise it 793 // rewrites the current bytecode into _0 that doesn't do 794 // the pair check anymore. 795 // 796 // Note: If the next bytecode is _getfield, the rewrite must be 797 // delayed, otherwise we may miss an opportunity for a pair. 798 // 799 // Also rewrite frequent pairs 800 // aload_0, aload_1 801 // aload_0, iload_1 802 // These bytecodes with a small amount of code are most profitable 803 // to rewrite. 804 805 if (RewriteFrequentPairs && rc == may_rewrite) { 806 807 Label Lrewrite, Ldont_rewrite; 808 Register Rnext_byte = R3_ARG1, 809 Rrewrite_to = R6_ARG4, 810 Rscratch = R11_scratch1; 811 812 // Get next byte. 813 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 814 815 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 816 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 817 __ beq(CCR0, Ldont_rewrite); 818 819 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 820 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 821 __ beq(CCR1, Lrewrite); 822 823 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 824 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 825 __ beq(CCR0, Lrewrite); 826 827 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 828 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 829 __ beq(CCR1, Lrewrite); 830 831 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 832 833 __ bind(Lrewrite); 834 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 835 __ bind(Ldont_rewrite); 836 } 837 838 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 839 aload(0); 840 } 841 842 void TemplateTable::istore() { 843 transition(itos, vtos); 844 845 const Register Rindex = R11_scratch1; 846 locals_index(Rindex); 847 __ store_local_int(R17_tos, Rindex); 848 } 849 850 void TemplateTable::lstore() { 851 transition(ltos, vtos); 852 const Register Rindex = R11_scratch1; 853 locals_index(Rindex); 854 __ store_local_long(R17_tos, Rindex); 855 } 856 857 void TemplateTable::fstore() { 858 transition(ftos, vtos); 859 860 const Register Rindex = R11_scratch1; 861 locals_index(Rindex); 862 __ store_local_float(F15_ftos, Rindex); 863 } 864 865 void TemplateTable::dstore() { 866 transition(dtos, vtos); 867 868 const Register Rindex = R11_scratch1; 869 locals_index(Rindex); 870 __ store_local_double(F15_ftos, Rindex); 871 } 872 873 void TemplateTable::astore() { 874 transition(vtos, vtos); 875 876 const Register Rindex = R11_scratch1; 877 __ pop_ptr(); 878 __ verify_oop_or_return_address(R17_tos, Rindex); 879 locals_index(Rindex); 880 __ store_local_ptr(R17_tos, Rindex); 881 } 882 883 void TemplateTable::wide_istore() { 884 transition(vtos, vtos); 885 886 const Register Rindex = R11_scratch1; 887 __ pop_i(); 888 locals_index_wide(Rindex); 889 __ store_local_int(R17_tos, Rindex); 890 } 891 892 void TemplateTable::wide_lstore() { 893 transition(vtos, vtos); 894 895 const Register Rindex = R11_scratch1; 896 __ pop_l(); 897 locals_index_wide(Rindex); 898 __ store_local_long(R17_tos, Rindex); 899 } 900 901 void TemplateTable::wide_fstore() { 902 transition(vtos, vtos); 903 904 const Register Rindex = R11_scratch1; 905 __ pop_f(); 906 locals_index_wide(Rindex); 907 __ store_local_float(F15_ftos, Rindex); 908 } 909 910 void TemplateTable::wide_dstore() { 911 transition(vtos, vtos); 912 913 const Register Rindex = R11_scratch1; 914 __ pop_d(); 915 locals_index_wide(Rindex); 916 __ store_local_double(F15_ftos, Rindex); 917 } 918 919 void TemplateTable::wide_astore() { 920 transition(vtos, vtos); 921 922 const Register Rindex = R11_scratch1; 923 __ pop_ptr(); 924 __ verify_oop_or_return_address(R17_tos, Rindex); 925 locals_index_wide(Rindex); 926 __ store_local_ptr(R17_tos, Rindex); 927 } 928 929 void TemplateTable::iastore() { 930 transition(itos, vtos); 931 932 const Register Rindex = R3_ARG1, 933 Rstore_addr = R4_ARG2, 934 Rarray = R5_ARG3, 935 Rtemp = R6_ARG4; 936 __ pop_i(Rindex); 937 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 938 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 939 } 940 941 void TemplateTable::lastore() { 942 transition(ltos, vtos); 943 944 const Register Rindex = R3_ARG1, 945 Rstore_addr = R4_ARG2, 946 Rarray = R5_ARG3, 947 Rtemp = R6_ARG4; 948 __ pop_i(Rindex); 949 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 950 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 951 } 952 953 void TemplateTable::fastore() { 954 transition(ftos, vtos); 955 956 const Register Rindex = R3_ARG1, 957 Rstore_addr = R4_ARG2, 958 Rarray = R5_ARG3, 959 Rtemp = R6_ARG4; 960 __ pop_i(Rindex); 961 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 962 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 963 } 964 965 void TemplateTable::dastore() { 966 transition(dtos, vtos); 967 968 const Register Rindex = R3_ARG1, 969 Rstore_addr = R4_ARG2, 970 Rarray = R5_ARG3, 971 Rtemp = R6_ARG4; 972 __ pop_i(Rindex); 973 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 974 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 975 } 976 977 // Pop 3 values from the stack and... 978 void TemplateTable::aastore() { 979 transition(vtos, vtos); 980 981 Label Lstore_ok, Lis_null, Ldone; 982 const Register Rindex = R3_ARG1, 983 Rarray = R4_ARG2, 984 Rscratch = R11_scratch1, 985 Rscratch2 = R12_scratch2, 986 Rarray_klass = R5_ARG3, 987 Rarray_element_klass = Rarray_klass, 988 Rvalue_klass = R6_ARG4, 989 Rstore_addr = R31; // Use register which survives VM call. 990 991 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 992 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 993 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 994 995 __ verify_oop(R17_tos); 996 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 997 // Rindex is dead! 998 Register Rscratch3 = Rindex; 999 1000 // Do array store check - check for NULL value first. 1001 __ cmpdi(CCR0, R17_tos, 0); 1002 __ beq(CCR0, Lis_null); 1003 1004 __ load_klass(Rarray_klass, Rarray); 1005 __ load_klass(Rvalue_klass, R17_tos); 1006 1007 // Do fast instanceof cache test. 1008 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 1009 1010 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 1011 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 1012 1013 // Fell through: subtype check failed => throw an exception. 1014 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 1015 __ mtctr(R11_scratch1); 1016 __ bctr(); 1017 1018 __ bind(Lis_null); 1019 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 1020 Rscratch, Rscratch2, Rscratch3, IN_HEAP | IN_HEAP_ARRAY); 1021 __ profile_null_seen(Rscratch, Rscratch2); 1022 __ b(Ldone); 1023 1024 // Store is OK. 1025 __ bind(Lstore_ok); 1026 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 1027 Rscratch, Rscratch2, Rscratch3, IN_HEAP | IN_HEAP_ARRAY | OOP_NOT_NULL); 1028 1029 __ bind(Ldone); 1030 // Adjust sp (pops array, index and value). 1031 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 1032 } 1033 1034 void TemplateTable::bastore() { 1035 transition(itos, vtos); 1036 1037 const Register Rindex = R11_scratch1, 1038 Rarray = R12_scratch2, 1039 Rscratch = R3_ARG1; 1040 __ pop_i(Rindex); 1041 __ pop_ptr(Rarray); 1042 // tos: val 1043 1044 // Need to check whether array is boolean or byte 1045 // since both types share the bastore bytecode. 1046 __ load_klass(Rscratch, Rarray); 1047 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 1048 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 1049 __ testbitdi(CCR0, R0, Rscratch, diffbit); 1050 Label L_skip; 1051 __ bfalse(CCR0, L_skip); 1052 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1053 __ bind(L_skip); 1054 1055 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 1056 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 1057 } 1058 1059 void TemplateTable::castore() { 1060 transition(itos, vtos); 1061 1062 const Register Rindex = R11_scratch1, 1063 Rarray = R12_scratch2, 1064 Rscratch = R3_ARG1; 1065 __ pop_i(Rindex); 1066 // tos: val 1067 // Rarray: array ptr (popped by index_check) 1068 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1069 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1070 } 1071 1072 void TemplateTable::sastore() { 1073 castore(); 1074 } 1075 1076 void TemplateTable::istore(int n) { 1077 transition(itos, vtos); 1078 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1079 } 1080 1081 void TemplateTable::lstore(int n) { 1082 transition(ltos, vtos); 1083 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1084 } 1085 1086 void TemplateTable::fstore(int n) { 1087 transition(ftos, vtos); 1088 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1089 } 1090 1091 void TemplateTable::dstore(int n) { 1092 transition(dtos, vtos); 1093 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1094 } 1095 1096 void TemplateTable::astore(int n) { 1097 transition(vtos, vtos); 1098 1099 __ pop_ptr(); 1100 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1101 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1102 } 1103 1104 void TemplateTable::pop() { 1105 transition(vtos, vtos); 1106 1107 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1108 } 1109 1110 void TemplateTable::pop2() { 1111 transition(vtos, vtos); 1112 1113 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1114 } 1115 1116 void TemplateTable::dup() { 1117 transition(vtos, vtos); 1118 1119 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1120 __ push_ptr(R11_scratch1); 1121 } 1122 1123 void TemplateTable::dup_x1() { 1124 transition(vtos, vtos); 1125 1126 Register Ra = R11_scratch1, 1127 Rb = R12_scratch2; 1128 // stack: ..., a, b 1129 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1130 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1131 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1132 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1133 __ push_ptr(Rb); 1134 // stack: ..., b, a, b 1135 } 1136 1137 void TemplateTable::dup_x2() { 1138 transition(vtos, vtos); 1139 1140 Register Ra = R11_scratch1, 1141 Rb = R12_scratch2, 1142 Rc = R3_ARG1; 1143 1144 // stack: ..., a, b, c 1145 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1146 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1147 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1148 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1149 // stack: ..., c, b, c 1150 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1151 // stack: ..., c, a, c 1152 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1153 __ push_ptr(Rc); // push c 1154 // stack: ..., c, a, b, c 1155 } 1156 1157 void TemplateTable::dup2() { 1158 transition(vtos, vtos); 1159 1160 Register Ra = R11_scratch1, 1161 Rb = R12_scratch2; 1162 // stack: ..., a, b 1163 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1164 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1165 __ push_2ptrs(Ra, Rb); 1166 // stack: ..., a, b, a, b 1167 } 1168 1169 void TemplateTable::dup2_x1() { 1170 transition(vtos, vtos); 1171 1172 Register Ra = R11_scratch1, 1173 Rb = R12_scratch2, 1174 Rc = R3_ARG1; 1175 // stack: ..., a, b, c 1176 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1177 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1178 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1179 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1180 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1181 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1182 // stack: ..., b, c, a 1183 __ push_2ptrs(Rb, Rc); 1184 // stack: ..., b, c, a, b, c 1185 } 1186 1187 void TemplateTable::dup2_x2() { 1188 transition(vtos, vtos); 1189 1190 Register Ra = R11_scratch1, 1191 Rb = R12_scratch2, 1192 Rc = R3_ARG1, 1193 Rd = R4_ARG2; 1194 // stack: ..., a, b, c, d 1195 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1196 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1197 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1198 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1199 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1200 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1201 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1202 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1203 // stack: ..., c, d, a, b 1204 __ push_2ptrs(Rc, Rd); 1205 // stack: ..., c, d, a, b, c, d 1206 } 1207 1208 void TemplateTable::swap() { 1209 transition(vtos, vtos); 1210 // stack: ..., a, b 1211 1212 Register Ra = R11_scratch1, 1213 Rb = R12_scratch2; 1214 // stack: ..., a, b 1215 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1216 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1217 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1218 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1219 // stack: ..., b, a 1220 } 1221 1222 void TemplateTable::iop2(Operation op) { 1223 transition(itos, itos); 1224 1225 Register Rscratch = R11_scratch1; 1226 1227 __ pop_i(Rscratch); 1228 // tos = number of bits to shift 1229 // Rscratch = value to shift 1230 switch (op) { 1231 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1232 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1233 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1234 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1235 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1236 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1237 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1238 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1239 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1240 default: ShouldNotReachHere(); 1241 } 1242 } 1243 1244 void TemplateTable::lop2(Operation op) { 1245 transition(ltos, ltos); 1246 1247 Register Rscratch = R11_scratch1; 1248 __ pop_l(Rscratch); 1249 switch (op) { 1250 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1251 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1252 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1253 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1254 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1255 default: ShouldNotReachHere(); 1256 } 1257 } 1258 1259 void TemplateTable::idiv() { 1260 transition(itos, itos); 1261 1262 Label Lnormal, Lexception, Ldone; 1263 Register Rdividend = R11_scratch1; // Used by irem. 1264 1265 __ addi(R0, R17_tos, 1); 1266 __ cmplwi(CCR0, R0, 2); 1267 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1268 1269 __ cmpwi(CCR1, R17_tos, 0); 1270 __ beq(CCR1, Lexception); // divisor == 0 1271 1272 __ pop_i(Rdividend); 1273 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1274 __ b(Ldone); 1275 1276 __ bind(Lexception); 1277 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1278 __ mtctr(R11_scratch1); 1279 __ bctr(); 1280 1281 __ align(32, 12); 1282 __ bind(Lnormal); 1283 __ pop_i(Rdividend); 1284 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1285 __ bind(Ldone); 1286 } 1287 1288 void TemplateTable::irem() { 1289 transition(itos, itos); 1290 1291 __ mr(R12_scratch2, R17_tos); 1292 idiv(); 1293 __ mullw(R17_tos, R17_tos, R12_scratch2); 1294 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1295 } 1296 1297 void TemplateTable::lmul() { 1298 transition(ltos, ltos); 1299 1300 __ pop_l(R11_scratch1); 1301 __ mulld(R17_tos, R11_scratch1, R17_tos); 1302 } 1303 1304 void TemplateTable::ldiv() { 1305 transition(ltos, ltos); 1306 1307 Label Lnormal, Lexception, Ldone; 1308 Register Rdividend = R11_scratch1; // Used by lrem. 1309 1310 __ addi(R0, R17_tos, 1); 1311 __ cmpldi(CCR0, R0, 2); 1312 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1313 1314 __ cmpdi(CCR1, R17_tos, 0); 1315 __ beq(CCR1, Lexception); // divisor == 0 1316 1317 __ pop_l(Rdividend); 1318 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1319 __ b(Ldone); 1320 1321 __ bind(Lexception); 1322 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1323 __ mtctr(R11_scratch1); 1324 __ bctr(); 1325 1326 __ align(32, 12); 1327 __ bind(Lnormal); 1328 __ pop_l(Rdividend); 1329 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1330 __ bind(Ldone); 1331 } 1332 1333 void TemplateTable::lrem() { 1334 transition(ltos, ltos); 1335 1336 __ mr(R12_scratch2, R17_tos); 1337 ldiv(); 1338 __ mulld(R17_tos, R17_tos, R12_scratch2); 1339 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1340 } 1341 1342 void TemplateTable::lshl() { 1343 transition(itos, ltos); 1344 1345 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1346 __ pop_l(R11_scratch1); 1347 __ sld(R17_tos, R11_scratch1, R17_tos); 1348 } 1349 1350 void TemplateTable::lshr() { 1351 transition(itos, ltos); 1352 1353 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1354 __ pop_l(R11_scratch1); 1355 __ srad(R17_tos, R11_scratch1, R17_tos); 1356 } 1357 1358 void TemplateTable::lushr() { 1359 transition(itos, ltos); 1360 1361 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1362 __ pop_l(R11_scratch1); 1363 __ srd(R17_tos, R11_scratch1, R17_tos); 1364 } 1365 1366 void TemplateTable::fop2(Operation op) { 1367 transition(ftos, ftos); 1368 1369 switch (op) { 1370 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1371 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1372 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1373 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1374 case rem: 1375 __ pop_f(F1_ARG1); 1376 __ fmr(F2_ARG2, F15_ftos); 1377 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1378 __ fmr(F15_ftos, F1_RET); 1379 break; 1380 1381 default: ShouldNotReachHere(); 1382 } 1383 } 1384 1385 void TemplateTable::dop2(Operation op) { 1386 transition(dtos, dtos); 1387 1388 switch (op) { 1389 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1390 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1391 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1392 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1393 case rem: 1394 __ pop_d(F1_ARG1); 1395 __ fmr(F2_ARG2, F15_ftos); 1396 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1397 __ fmr(F15_ftos, F1_RET); 1398 break; 1399 1400 default: ShouldNotReachHere(); 1401 } 1402 } 1403 1404 // Negate the value in the TOS cache. 1405 void TemplateTable::ineg() { 1406 transition(itos, itos); 1407 1408 __ neg(R17_tos, R17_tos); 1409 } 1410 1411 // Negate the value in the TOS cache. 1412 void TemplateTable::lneg() { 1413 transition(ltos, ltos); 1414 1415 __ neg(R17_tos, R17_tos); 1416 } 1417 1418 void TemplateTable::fneg() { 1419 transition(ftos, ftos); 1420 1421 __ fneg(F15_ftos, F15_ftos); 1422 } 1423 1424 void TemplateTable::dneg() { 1425 transition(dtos, dtos); 1426 1427 __ fneg(F15_ftos, F15_ftos); 1428 } 1429 1430 // Increments a local variable in place. 1431 void TemplateTable::iinc() { 1432 transition(vtos, vtos); 1433 1434 const Register Rindex = R11_scratch1, 1435 Rincrement = R0, 1436 Rvalue = R12_scratch2; 1437 1438 locals_index(Rindex); // Load locals index from bytecode stream. 1439 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1440 __ extsb(Rincrement, Rincrement); 1441 1442 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1443 1444 __ add(Rvalue, Rincrement, Rvalue); 1445 __ stw(Rvalue, 0, Rindex); 1446 } 1447 1448 void TemplateTable::wide_iinc() { 1449 transition(vtos, vtos); 1450 1451 Register Rindex = R11_scratch1, 1452 Rlocals_addr = Rindex, 1453 Rincr = R12_scratch2; 1454 locals_index_wide(Rindex); 1455 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1456 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1457 __ add(R17_tos, Rincr, R17_tos); 1458 __ stw(R17_tos, 0, Rlocals_addr); 1459 } 1460 1461 void TemplateTable::convert() { 1462 // %%%%% Factor this first part accross platforms 1463 #ifdef ASSERT 1464 TosState tos_in = ilgl; 1465 TosState tos_out = ilgl; 1466 switch (bytecode()) { 1467 case Bytecodes::_i2l: // fall through 1468 case Bytecodes::_i2f: // fall through 1469 case Bytecodes::_i2d: // fall through 1470 case Bytecodes::_i2b: // fall through 1471 case Bytecodes::_i2c: // fall through 1472 case Bytecodes::_i2s: tos_in = itos; break; 1473 case Bytecodes::_l2i: // fall through 1474 case Bytecodes::_l2f: // fall through 1475 case Bytecodes::_l2d: tos_in = ltos; break; 1476 case Bytecodes::_f2i: // fall through 1477 case Bytecodes::_f2l: // fall through 1478 case Bytecodes::_f2d: tos_in = ftos; break; 1479 case Bytecodes::_d2i: // fall through 1480 case Bytecodes::_d2l: // fall through 1481 case Bytecodes::_d2f: tos_in = dtos; break; 1482 default : ShouldNotReachHere(); 1483 } 1484 switch (bytecode()) { 1485 case Bytecodes::_l2i: // fall through 1486 case Bytecodes::_f2i: // fall through 1487 case Bytecodes::_d2i: // fall through 1488 case Bytecodes::_i2b: // fall through 1489 case Bytecodes::_i2c: // fall through 1490 case Bytecodes::_i2s: tos_out = itos; break; 1491 case Bytecodes::_i2l: // fall through 1492 case Bytecodes::_f2l: // fall through 1493 case Bytecodes::_d2l: tos_out = ltos; break; 1494 case Bytecodes::_i2f: // fall through 1495 case Bytecodes::_l2f: // fall through 1496 case Bytecodes::_d2f: tos_out = ftos; break; 1497 case Bytecodes::_i2d: // fall through 1498 case Bytecodes::_l2d: // fall through 1499 case Bytecodes::_f2d: tos_out = dtos; break; 1500 default : ShouldNotReachHere(); 1501 } 1502 transition(tos_in, tos_out); 1503 #endif 1504 1505 // Conversion 1506 Label done; 1507 switch (bytecode()) { 1508 case Bytecodes::_i2l: 1509 __ extsw(R17_tos, R17_tos); 1510 break; 1511 1512 case Bytecodes::_l2i: 1513 // Nothing to do, we'll continue to work with the lower bits. 1514 break; 1515 1516 case Bytecodes::_i2b: 1517 __ extsb(R17_tos, R17_tos); 1518 break; 1519 1520 case Bytecodes::_i2c: 1521 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1522 break; 1523 1524 case Bytecodes::_i2s: 1525 __ extsh(R17_tos, R17_tos); 1526 break; 1527 1528 case Bytecodes::_i2d: 1529 __ extsw(R17_tos, R17_tos); 1530 case Bytecodes::_l2d: 1531 __ move_l_to_d(); 1532 __ fcfid(F15_ftos, F15_ftos); 1533 break; 1534 1535 case Bytecodes::_i2f: 1536 __ extsw(R17_tos, R17_tos); 1537 __ move_l_to_d(); 1538 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1539 // Comment: alternatively, load with sign extend could be done by lfiwax. 1540 __ fcfids(F15_ftos, F15_ftos); 1541 } else { 1542 __ fcfid(F15_ftos, F15_ftos); 1543 __ frsp(F15_ftos, F15_ftos); 1544 } 1545 break; 1546 1547 case Bytecodes::_l2f: 1548 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1549 __ move_l_to_d(); 1550 __ fcfids(F15_ftos, F15_ftos); 1551 } else { 1552 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1553 __ mr(R3_ARG1, R17_tos); 1554 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1555 __ fmr(F15_ftos, F1_RET); 1556 } 1557 break; 1558 1559 case Bytecodes::_f2d: 1560 // empty 1561 break; 1562 1563 case Bytecodes::_d2f: 1564 __ frsp(F15_ftos, F15_ftos); 1565 break; 1566 1567 case Bytecodes::_d2i: 1568 case Bytecodes::_f2i: 1569 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1570 __ li(R17_tos, 0); // 0 in case of NAN 1571 __ bso(CCR0, done); 1572 __ fctiwz(F15_ftos, F15_ftos); 1573 __ move_d_to_l(); 1574 break; 1575 1576 case Bytecodes::_d2l: 1577 case Bytecodes::_f2l: 1578 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1579 __ li(R17_tos, 0); // 0 in case of NAN 1580 __ bso(CCR0, done); 1581 __ fctidz(F15_ftos, F15_ftos); 1582 __ move_d_to_l(); 1583 break; 1584 1585 default: ShouldNotReachHere(); 1586 } 1587 __ bind(done); 1588 } 1589 1590 // Long compare 1591 void TemplateTable::lcmp() { 1592 transition(ltos, itos); 1593 1594 const Register Rscratch = R11_scratch1; 1595 __ pop_l(Rscratch); // first operand, deeper in stack 1596 1597 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1598 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1599 __ srwi(Rscratch, R17_tos, 30); 1600 __ srawi(R17_tos, R17_tos, 31); 1601 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1602 } 1603 1604 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1605 // unordered_result == -1 => fcmpl or dcmpl 1606 // unordered_result == 1 => fcmpg or dcmpg 1607 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1608 const FloatRegister Rfirst = F0_SCRATCH, 1609 Rsecond = F15_ftos; 1610 const Register Rscratch = R11_scratch1; 1611 1612 if (is_float) { 1613 __ pop_f(Rfirst); 1614 } else { 1615 __ pop_d(Rfirst); 1616 } 1617 1618 Label Lunordered, Ldone; 1619 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1620 if (unordered_result) { 1621 __ bso(CCR0, Lunordered); 1622 } 1623 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1624 __ srwi(Rscratch, R17_tos, 30); 1625 __ srawi(R17_tos, R17_tos, 31); 1626 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1627 if (unordered_result) { 1628 __ b(Ldone); 1629 __ bind(Lunordered); 1630 __ load_const_optimized(R17_tos, unordered_result); 1631 } 1632 __ bind(Ldone); 1633 } 1634 1635 // Branch_conditional which takes TemplateTable::Condition. 1636 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1637 bool positive = false; 1638 Assembler::Condition cond = Assembler::equal; 1639 switch (cc) { 1640 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1641 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1642 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1643 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1644 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1645 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1646 default: ShouldNotReachHere(); 1647 } 1648 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1649 int bi = Assembler::bi0(crx, cond); 1650 __ bc(bo, bi, L); 1651 } 1652 1653 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1654 1655 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1656 __ verify_thread(); 1657 1658 const Register Rscratch1 = R11_scratch1, 1659 Rscratch2 = R12_scratch2, 1660 Rscratch3 = R3_ARG1, 1661 R4_counters = R4_ARG2, 1662 bumped_count = R31, 1663 Rdisp = R22_tmp2; 1664 1665 __ profile_taken_branch(Rscratch1, bumped_count); 1666 1667 // Get (wide) offset. 1668 if (is_wide) { 1669 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1670 } else { 1671 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1672 } 1673 1674 // -------------------------------------------------------------------------- 1675 // Handle all the JSR stuff here, then exit. 1676 // It's much shorter and cleaner than intermingling with the 1677 // non-JSR normal-branch stuff occurring below. 1678 if (is_jsr) { 1679 // Compute return address as bci in Otos_i. 1680 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1681 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1682 __ subf(R17_tos, Rscratch1, Rscratch2); 1683 1684 // Bump bcp to target of JSR. 1685 __ add(R14_bcp, Rdisp, R14_bcp); 1686 // Push returnAddress for "ret" on stack. 1687 __ push_ptr(R17_tos); 1688 // And away we go! 1689 __ dispatch_next(vtos, 0 ,true); 1690 return; 1691 } 1692 1693 // -------------------------------------------------------------------------- 1694 // Normal (non-jsr) branch handling 1695 1696 // Bump bytecode pointer by displacement (take the branch). 1697 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1698 1699 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1700 if (increment_invocation_counter_for_backward_branches) { 1701 Label Lforward; 1702 1703 // Check branch direction. 1704 __ cmpdi(CCR0, Rdisp, 0); 1705 __ bgt(CCR0, Lforward); 1706 1707 __ get_method_counters(R19_method, R4_counters, Lforward); 1708 1709 if (TieredCompilation) { 1710 Label Lno_mdo, Loverflow; 1711 const int increment = InvocationCounter::count_increment; 1712 if (ProfileInterpreter) { 1713 Register Rmdo = Rscratch1; 1714 1715 // If no method data exists, go to profile_continue. 1716 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1717 __ cmpdi(CCR0, Rmdo, 0); 1718 __ beq(CCR0, Lno_mdo); 1719 1720 // Increment backedge counter in the MDO. 1721 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1722 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1723 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo); 1724 __ addi(Rscratch2, Rscratch2, increment); 1725 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1726 if (UseOnStackReplacement) { 1727 __ and_(Rscratch3, Rscratch2, Rscratch3); 1728 __ bne(CCR0, Lforward); 1729 __ b(Loverflow); 1730 } else { 1731 __ b(Lforward); 1732 } 1733 } 1734 1735 // If there's no MDO, increment counter in method. 1736 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1737 __ bind(Lno_mdo); 1738 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1739 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters); 1740 __ addi(Rscratch2, Rscratch2, increment); 1741 __ stw(Rscratch2, mo_bc_offs, R4_counters); 1742 if (UseOnStackReplacement) { 1743 __ and_(Rscratch3, Rscratch2, Rscratch3); 1744 __ bne(CCR0, Lforward); 1745 } else { 1746 __ b(Lforward); 1747 } 1748 __ bind(Loverflow); 1749 1750 // Notify point for loop, pass branch bytecode. 1751 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp). 1752 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 1753 1754 // Was an OSR adapter generated? 1755 __ cmpdi(CCR0, R3_RET, 0); 1756 __ beq(CCR0, Lforward); 1757 1758 // Has the nmethod been invalidated already? 1759 __ lbz(R0, nmethod::state_offset(), R3_RET); 1760 __ cmpwi(CCR0, R0, nmethod::in_use); 1761 __ bne(CCR0, Lforward); 1762 1763 // Migrate the interpreter frame off of the stack. 1764 // We can use all registers because we will not return to interpreter from this point. 1765 1766 // Save nmethod. 1767 const Register osr_nmethod = R31; 1768 __ mr(osr_nmethod, R3_RET); 1769 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1770 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1771 __ reset_last_Java_frame(); 1772 // OSR buffer is in ARG1. 1773 1774 // Remove the interpreter frame. 1775 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1776 1777 // Jump to the osr code. 1778 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1779 __ mtlr(R0); 1780 __ mtctr(R11_scratch1); 1781 __ bctr(); 1782 1783 } else { 1784 1785 const Register invoke_ctr = Rscratch1; 1786 // Update Backedge branch separately from invocations. 1787 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1788 1789 if (ProfileInterpreter) { 1790 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward); 1791 if (UseOnStackReplacement) { 1792 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2); 1793 } 1794 } else { 1795 if (UseOnStackReplacement) { 1796 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2); 1797 } 1798 } 1799 } 1800 1801 __ bind(Lforward); 1802 } 1803 __ dispatch_next(vtos, 0, true); 1804 } 1805 1806 // Helper function for if_cmp* methods below. 1807 // Factored out common compare and branch code. 1808 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1809 Label Lnot_taken; 1810 // Note: The condition code we get is the condition under which we 1811 // *fall through*! So we have to inverse the CC here. 1812 1813 if (is_jint) { 1814 if (cmp0) { 1815 __ cmpwi(CCR0, Rfirst, 0); 1816 } else { 1817 __ cmpw(CCR0, Rfirst, Rsecond); 1818 } 1819 } else { 1820 if (cmp0) { 1821 __ cmpdi(CCR0, Rfirst, 0); 1822 } else { 1823 __ cmpd(CCR0, Rfirst, Rsecond); 1824 } 1825 } 1826 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1827 1828 // Conition is false => Jump! 1829 branch(false, false); 1830 1831 // Condition is not true => Continue. 1832 __ align(32, 12); 1833 __ bind(Lnot_taken); 1834 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1835 } 1836 1837 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1838 void TemplateTable::if_0cmp(Condition cc) { 1839 transition(itos, vtos); 1840 1841 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1842 } 1843 1844 // Compare integer values and fall through if CC holds, branch away otherwise. 1845 // 1846 // Interface: 1847 // - Rfirst: First operand (older stack value) 1848 // - tos: Second operand (younger stack value) 1849 void TemplateTable::if_icmp(Condition cc) { 1850 transition(itos, vtos); 1851 1852 const Register Rfirst = R0, 1853 Rsecond = R17_tos; 1854 1855 __ pop_i(Rfirst); 1856 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1857 } 1858 1859 void TemplateTable::if_nullcmp(Condition cc) { 1860 transition(atos, vtos); 1861 1862 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1863 } 1864 1865 void TemplateTable::if_acmp(Condition cc) { 1866 transition(atos, vtos); 1867 1868 const Register Rfirst = R0, 1869 Rsecond = R17_tos; 1870 1871 __ pop_ptr(Rfirst); 1872 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1873 } 1874 1875 void TemplateTable::ret() { 1876 locals_index(R11_scratch1); 1877 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1878 1879 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1880 1881 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1882 __ add(R11_scratch1, R17_tos, R11_scratch1); 1883 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1884 __ dispatch_next(vtos, 0, true); 1885 } 1886 1887 void TemplateTable::wide_ret() { 1888 transition(vtos, vtos); 1889 1890 const Register Rindex = R3_ARG1, 1891 Rscratch1 = R11_scratch1, 1892 Rscratch2 = R12_scratch2; 1893 1894 locals_index_wide(Rindex); 1895 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1896 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1897 // Tos now contains the bci, compute the bcp from that. 1898 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1899 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1900 __ add(R14_bcp, Rscratch1, Rscratch2); 1901 __ dispatch_next(vtos, 0, true); 1902 } 1903 1904 void TemplateTable::tableswitch() { 1905 transition(itos, vtos); 1906 1907 Label Ldispatch, Ldefault_case; 1908 Register Rlow_byte = R3_ARG1, 1909 Rindex = Rlow_byte, 1910 Rhigh_byte = R4_ARG2, 1911 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1912 Rscratch1 = R11_scratch1, 1913 Rscratch2 = R12_scratch2, 1914 Roffset = R6_ARG4; 1915 1916 // Align bcp. 1917 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1918 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1919 1920 // Load lo & hi. 1921 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1922 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1923 1924 // Check for default case (=index outside [low,high]). 1925 __ cmpw(CCR0, R17_tos, Rlow_byte); 1926 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1927 __ blt(CCR0, Ldefault_case); 1928 __ bgt(CCR1, Ldefault_case); 1929 1930 // Lookup dispatch offset. 1931 __ sub(Rindex, R17_tos, Rlow_byte); 1932 __ extsw(Rindex, Rindex); 1933 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1934 __ sldi(Rindex, Rindex, LogBytesPerInt); 1935 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1936 #if defined(VM_LITTLE_ENDIAN) 1937 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1938 __ extsw(Roffset, Roffset); 1939 #else 1940 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1941 #endif 1942 __ b(Ldispatch); 1943 1944 __ bind(Ldefault_case); 1945 __ profile_switch_default(Rhigh_byte, Rscratch1); 1946 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1947 1948 __ bind(Ldispatch); 1949 1950 __ add(R14_bcp, Roffset, R14_bcp); 1951 __ dispatch_next(vtos, 0, true); 1952 } 1953 1954 void TemplateTable::lookupswitch() { 1955 transition(itos, itos); 1956 __ stop("lookupswitch bytecode should have been rewritten"); 1957 } 1958 1959 // Table switch using linear search through cases. 1960 // Bytecode stream format: 1961 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1962 // Note: Everything is big-endian format here. 1963 void TemplateTable::fast_linearswitch() { 1964 transition(itos, vtos); 1965 1966 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1967 Register Rcount = R3_ARG1, 1968 Rcurrent_pair = R4_ARG2, 1969 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1970 Roffset = R31, // Might need to survive C call. 1971 Rvalue = R12_scratch2, 1972 Rscratch = R11_scratch1, 1973 Rcmp_value = R17_tos; 1974 1975 // Align bcp. 1976 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1977 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1978 1979 // Setup loop counter and limit. 1980 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1981 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1982 1983 __ mtctr(Rcount); 1984 __ cmpwi(CCR0, Rcount, 0); 1985 __ bne(CCR0, Lloop_entry); 1986 1987 // Default case 1988 __ bind(Ldefault_case); 1989 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1990 if (ProfileInterpreter) { 1991 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1992 } 1993 __ b(Lcontinue_execution); 1994 1995 // Next iteration 1996 __ bind(Lsearch_loop); 1997 __ bdz(Ldefault_case); 1998 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1999 __ bind(Lloop_entry); 2000 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 2001 __ cmpw(CCR0, Rvalue, Rcmp_value); 2002 __ bne(CCR0, Lsearch_loop); 2003 2004 // Found, load offset. 2005 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 2006 // Calculate case index and profile 2007 __ mfctr(Rcurrent_pair); 2008 if (ProfileInterpreter) { 2009 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 2010 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 2011 } 2012 2013 __ bind(Lcontinue_execution); 2014 __ add(R14_bcp, Roffset, R14_bcp); 2015 __ dispatch_next(vtos, 0, true); 2016 } 2017 2018 // Table switch using binary search (value/offset pairs are ordered). 2019 // Bytecode stream format: 2020 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2021 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 2022 void TemplateTable::fast_binaryswitch() { 2023 2024 transition(itos, vtos); 2025 // Implementation using the following core algorithm: (copied from Intel) 2026 // 2027 // int binary_search(int key, LookupswitchPair* array, int n) { 2028 // // Binary search according to "Methodik des Programmierens" by 2029 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2030 // int i = 0; 2031 // int j = n; 2032 // while (i+1 < j) { 2033 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2034 // // with Q: for all i: 0 <= i < n: key < a[i] 2035 // // where a stands for the array and assuming that the (inexisting) 2036 // // element a[n] is infinitely big. 2037 // int h = (i + j) >> 1; 2038 // // i < h < j 2039 // if (key < array[h].fast_match()) { 2040 // j = h; 2041 // } else { 2042 // i = h; 2043 // } 2044 // } 2045 // // R: a[i] <= key < a[i+1] or Q 2046 // // (i.e., if key is within array, i is the correct index) 2047 // return i; 2048 // } 2049 2050 // register allocation 2051 const Register Rkey = R17_tos; // already set (tosca) 2052 const Register Rarray = R3_ARG1; 2053 const Register Ri = R4_ARG2; 2054 const Register Rj = R5_ARG3; 2055 const Register Rh = R6_ARG4; 2056 const Register Rscratch = R11_scratch1; 2057 2058 const int log_entry_size = 3; 2059 const int entry_size = 1 << log_entry_size; 2060 2061 Label found; 2062 2063 // Find Array start, 2064 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2065 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2066 2067 // initialize i & j 2068 __ li(Ri,0); 2069 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2070 2071 // and start. 2072 Label entry; 2073 __ b(entry); 2074 2075 // binary search loop 2076 { Label loop; 2077 __ bind(loop); 2078 // int h = (i + j) >> 1; 2079 __ srdi(Rh, Rh, 1); 2080 // if (key < array[h].fast_match()) { 2081 // j = h; 2082 // } else { 2083 // i = h; 2084 // } 2085 __ sldi(Rscratch, Rh, log_entry_size); 2086 #if defined(VM_LITTLE_ENDIAN) 2087 __ lwbrx(Rscratch, Rscratch, Rarray); 2088 #else 2089 __ lwzx(Rscratch, Rscratch, Rarray); 2090 #endif 2091 2092 // if (key < current value) 2093 // Rh = Rj 2094 // else 2095 // Rh = Ri 2096 Label Lgreater; 2097 __ cmpw(CCR0, Rkey, Rscratch); 2098 __ bge(CCR0, Lgreater); 2099 __ mr(Rj, Rh); 2100 __ b(entry); 2101 __ bind(Lgreater); 2102 __ mr(Ri, Rh); 2103 2104 // while (i+1 < j) 2105 __ bind(entry); 2106 __ addi(Rscratch, Ri, 1); 2107 __ cmpw(CCR0, Rscratch, Rj); 2108 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2109 2110 __ blt(CCR0, loop); 2111 } 2112 2113 // End of binary search, result index is i (must check again!). 2114 Label default_case; 2115 Label continue_execution; 2116 if (ProfileInterpreter) { 2117 __ mr(Rh, Ri); // Save index in i for profiling. 2118 } 2119 // Ri = value offset 2120 __ sldi(Ri, Ri, log_entry_size); 2121 __ add(Ri, Ri, Rarray); 2122 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2123 2124 Label not_found; 2125 // Ri = offset offset 2126 __ cmpw(CCR0, Rkey, Rscratch); 2127 __ beq(CCR0, not_found); 2128 // entry not found -> j = default offset 2129 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2130 __ b(default_case); 2131 2132 __ bind(not_found); 2133 // entry found -> j = offset 2134 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2135 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2136 2137 if (ProfileInterpreter) { 2138 __ b(continue_execution); 2139 } 2140 2141 __ bind(default_case); // fall through (if not profiling) 2142 __ profile_switch_default(Ri, Rscratch); 2143 2144 __ bind(continue_execution); 2145 2146 __ extsw(Rj, Rj); 2147 __ add(R14_bcp, Rj, R14_bcp); 2148 __ dispatch_next(vtos, 0 , true); 2149 } 2150 2151 void TemplateTable::_return(TosState state) { 2152 transition(state, state); 2153 assert(_desc->calls_vm(), 2154 "inconsistent calls_vm information"); // call in remove_activation 2155 2156 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2157 2158 Register Rscratch = R11_scratch1, 2159 Rklass = R12_scratch2, 2160 Rklass_flags = Rklass; 2161 Label Lskip_register_finalizer; 2162 2163 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2164 assert(state == vtos, "only valid state"); 2165 __ ld(R17_tos, 0, R18_locals); 2166 2167 // Load klass of this obj. 2168 __ load_klass(Rklass, R17_tos); 2169 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2170 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2171 __ bfalse(CCR0, Lskip_register_finalizer); 2172 2173 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2174 2175 __ align(32, 12); 2176 __ bind(Lskip_register_finalizer); 2177 } 2178 2179 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 2180 Label no_safepoint; 2181 __ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread); 2182 __ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit()); 2183 __ beq(CCR0, no_safepoint); 2184 __ push(state); 2185 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2186 __ pop(state); 2187 __ bind(no_safepoint); 2188 } 2189 2190 // Move the result value into the correct register and remove memory stack frame. 2191 __ remove_activation(state, /* throw_monitor_exception */ true); 2192 // Restoration of lr done by remove_activation. 2193 switch (state) { 2194 // Narrow result if state is itos but result type is smaller. 2195 // Need to narrow in the return bytecode rather than in generate_return_entry 2196 // since compiled code callers expect the result to already be narrowed. 2197 case itos: __ narrow(R17_tos); /* fall through */ 2198 case ltos: 2199 case atos: __ mr(R3_RET, R17_tos); break; 2200 case ftos: 2201 case dtos: __ fmr(F1_RET, F15_ftos); break; 2202 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2203 // to get visible before the reference to the object gets stored anywhere. 2204 __ membar(Assembler::StoreStore); break; 2205 default : ShouldNotReachHere(); 2206 } 2207 __ blr(); 2208 } 2209 2210 // ============================================================================ 2211 // Constant pool cache access 2212 // 2213 // Memory ordering: 2214 // 2215 // Like done in C++ interpreter, we load the fields 2216 // - _indices 2217 // - _f12_oop 2218 // acquired, because these are asked if the cache is already resolved. We don't 2219 // want to float loads above this check. 2220 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2221 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2222 2223 // Call into the VM if call site is not yet resolved 2224 // 2225 // Input regs: 2226 // - None, all passed regs are outputs. 2227 // 2228 // Returns: 2229 // - Rcache: The const pool cache entry that contains the resolved result. 2230 // - Rresult: Either noreg or output for f1/f2. 2231 // 2232 // Kills: 2233 // - Rscratch 2234 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2235 2236 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2237 Label Lresolved, Ldone; 2238 2239 Bytecodes::Code code = bytecode(); 2240 switch (code) { 2241 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2242 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2243 } 2244 2245 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2246 // We are resolved if the indices offset contains the current bytecode. 2247 #if defined(VM_LITTLE_ENDIAN) 2248 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2249 #else 2250 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2251 #endif 2252 // Acquire by cmp-br-isync (see below). 2253 __ cmpdi(CCR0, Rscratch, (int)code); 2254 __ beq(CCR0, Lresolved); 2255 2256 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2257 __ li(R4_ARG2, code); 2258 __ call_VM(noreg, entry, R4_ARG2, true); 2259 2260 // Update registers with resolved info. 2261 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2262 __ b(Ldone); 2263 2264 __ bind(Lresolved); 2265 __ isync(); // Order load wrt. succeeding loads. 2266 __ bind(Ldone); 2267 } 2268 2269 // Load the constant pool cache entry at field accesses into registers. 2270 // The Rcache and Rindex registers must be set before call. 2271 // Input: 2272 // - Rcache, Rindex 2273 // Output: 2274 // - Robj, Roffset, Rflags 2275 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2276 Register Rcache, 2277 Register Rindex /* unused on PPC64 */, 2278 Register Roffset, 2279 Register Rflags, 2280 bool is_static = false) { 2281 assert_different_registers(Rcache, Rflags, Roffset); 2282 // assert(Rindex == noreg, "parameter not used on PPC64"); 2283 2284 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2285 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2286 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2287 if (is_static) { 2288 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2289 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2290 __ resolve_oop_handle(Robj); 2291 // Acquire not needed here. Following access has an address dependency on this value. 2292 } 2293 } 2294 2295 // Load the constant pool cache entry at invokes into registers. 2296 // Resolve if necessary. 2297 2298 // Input Registers: 2299 // - None, bcp is used, though 2300 // 2301 // Return registers: 2302 // - Rmethod (f1 field or f2 if invokevirtual) 2303 // - Ritable_index (f2 field) 2304 // - Rflags (flags field) 2305 // 2306 // Kills: 2307 // - R21 2308 // 2309 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2310 Register Rmethod, 2311 Register Ritable_index, 2312 Register Rflags, 2313 bool is_invokevirtual, 2314 bool is_invokevfinal, 2315 bool is_invokedynamic) { 2316 2317 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2318 // Determine constant pool cache field offsets. 2319 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2320 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2321 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2322 // Access constant pool cache fields. 2323 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2324 2325 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2326 2327 if (is_invokevfinal) { 2328 assert(Ritable_index == noreg, "register not used"); 2329 // Already resolved. 2330 __ get_cache_and_index_at_bcp(Rcache, 1); 2331 } else { 2332 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2333 } 2334 2335 __ ld(Rmethod, method_offset, Rcache); 2336 __ ld(Rflags, flags_offset, Rcache); 2337 2338 if (Ritable_index != noreg) { 2339 __ ld(Ritable_index, index_offset, Rcache); 2340 } 2341 } 2342 2343 // ============================================================================ 2344 // Field access 2345 2346 // Volatile variables demand their effects be made known to all CPU's 2347 // in order. Store buffers on most chips allow reads & writes to 2348 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2349 // without some kind of memory barrier (i.e., it's not sufficient that 2350 // the interpreter does not reorder volatile references, the hardware 2351 // also must not reorder them). 2352 // 2353 // According to the new Java Memory Model (JMM): 2354 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2355 // writes act as aquire & release, so: 2356 // (2) A read cannot let unrelated NON-volatile memory refs that 2357 // happen after the read float up to before the read. It's OK for 2358 // non-volatile memory refs that happen before the volatile read to 2359 // float down below it. 2360 // (3) Similar a volatile write cannot let unrelated NON-volatile 2361 // memory refs that happen BEFORE the write float down to after the 2362 // write. It's OK for non-volatile memory refs that happen after the 2363 // volatile write to float up before it. 2364 // 2365 // We only put in barriers around volatile refs (they are expensive), 2366 // not _between_ memory refs (that would require us to track the 2367 // flavor of the previous memory refs). Requirements (2) and (3) 2368 // require some barriers before volatile stores and after volatile 2369 // loads. These nearly cover requirement (1) but miss the 2370 // volatile-store-volatile-load case. This final case is placed after 2371 // volatile-stores although it could just as well go before 2372 // volatile-loads. 2373 2374 // The registers cache and index expected to be set before call. 2375 // Correct values of the cache and index registers are preserved. 2376 // Kills: 2377 // Rcache (if has_tos) 2378 // Rscratch 2379 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2380 2381 assert_different_registers(Rcache, Rscratch); 2382 2383 if (JvmtiExport::can_post_field_access()) { 2384 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2385 Label Lno_field_access_post; 2386 2387 // Check if post field access in enabled. 2388 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2389 __ lwz(Rscratch, offs, Rscratch); 2390 2391 __ cmpwi(CCR0, Rscratch, 0); 2392 __ beq(CCR0, Lno_field_access_post); 2393 2394 // Post access enabled - do it! 2395 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2396 if (is_static) { 2397 __ li(R17_tos, 0); 2398 } else { 2399 if (has_tos) { 2400 // The fast bytecode versions have obj ptr in register. 2401 // Thus, save object pointer before call_VM() clobbers it 2402 // put object on tos where GC wants it. 2403 __ push_ptr(R17_tos); 2404 } else { 2405 // Load top of stack (do not pop the value off the stack). 2406 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2407 } 2408 __ verify_oop(R17_tos); 2409 } 2410 // tos: object pointer or NULL if static 2411 // cache: cache entry pointer 2412 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2413 if (!is_static && has_tos) { 2414 // Restore object pointer. 2415 __ pop_ptr(R17_tos); 2416 __ verify_oop(R17_tos); 2417 } else { 2418 // Cache is still needed to get class or obj. 2419 __ get_cache_and_index_at_bcp(Rcache, 1); 2420 } 2421 2422 __ align(32, 12); 2423 __ bind(Lno_field_access_post); 2424 } 2425 } 2426 2427 // kills R11_scratch1 2428 void TemplateTable::pop_and_check_object(Register Roop) { 2429 Register Rtmp = R11_scratch1; 2430 2431 assert_different_registers(Rtmp, Roop); 2432 __ pop_ptr(Roop); 2433 // For field access must check obj. 2434 __ null_check_throw(Roop, -1, Rtmp); 2435 __ verify_oop(Roop); 2436 } 2437 2438 // PPC64: implement volatile loads as fence-store-acquire. 2439 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2440 transition(vtos, vtos); 2441 2442 Label Lacquire, Lisync; 2443 2444 const Register Rcache = R3_ARG1, 2445 Rclass_or_obj = R22_tmp2, 2446 Roffset = R23_tmp3, 2447 Rflags = R31, 2448 Rbtable = R5_ARG3, 2449 Rbc = R6_ARG4, 2450 Rscratch = R12_scratch2; 2451 2452 static address field_branch_table[number_of_states], 2453 static_branch_table[number_of_states]; 2454 2455 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2456 2457 // Get field offset. 2458 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2459 2460 // JVMTI support 2461 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2462 2463 // Load after possible GC. 2464 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2465 2466 // Load pointer to branch table. 2467 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2468 2469 // Get volatile flag. 2470 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2471 // Note: sync is needed before volatile load on PPC64. 2472 2473 // Check field type. 2474 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2475 2476 #ifdef ASSERT 2477 Label LFlagInvalid; 2478 __ cmpldi(CCR0, Rflags, number_of_states); 2479 __ bge(CCR0, LFlagInvalid); 2480 #endif 2481 2482 // Load from branch table and dispatch (volatile case: one instruction ahead). 2483 __ sldi(Rflags, Rflags, LogBytesPerWord); 2484 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2485 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2486 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2487 } 2488 __ ldx(Rbtable, Rbtable, Rflags); 2489 2490 // Get the obj from stack. 2491 if (!is_static) { 2492 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2493 } else { 2494 __ verify_oop(Rclass_or_obj); 2495 } 2496 2497 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2498 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2499 } 2500 __ mtctr(Rbtable); 2501 __ bctr(); 2502 2503 #ifdef ASSERT 2504 __ bind(LFlagInvalid); 2505 __ stop("got invalid flag", 0x654); 2506 #endif 2507 2508 if (!is_static && rc == may_not_rewrite) { 2509 // We reuse the code from is_static. It's jumped to via the table above. 2510 return; 2511 } 2512 2513 #ifdef ASSERT 2514 // __ bind(Lvtos); 2515 address pc_before_fence = __ pc(); 2516 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2517 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2518 assert(branch_table[vtos] == 0, "can't compute twice"); 2519 branch_table[vtos] = __ pc(); // non-volatile_entry point 2520 __ stop("vtos unexpected", 0x655); 2521 #endif 2522 2523 __ align(32, 28, 28); // Align load. 2524 // __ bind(Ldtos); 2525 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2526 assert(branch_table[dtos] == 0, "can't compute twice"); 2527 branch_table[dtos] = __ pc(); // non-volatile_entry point 2528 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2529 __ push(dtos); 2530 if (!is_static && rc == may_rewrite) { 2531 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2532 } 2533 { 2534 Label acquire_double; 2535 __ beq(CCR6, acquire_double); // Volatile? 2536 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2537 2538 __ bind(acquire_double); 2539 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2540 __ beq_predict_taken(CCR0, Lisync); 2541 __ b(Lisync); // In case of NAN. 2542 } 2543 2544 __ align(32, 28, 28); // Align load. 2545 // __ bind(Lftos); 2546 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2547 assert(branch_table[ftos] == 0, "can't compute twice"); 2548 branch_table[ftos] = __ pc(); // non-volatile_entry point 2549 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2550 __ push(ftos); 2551 if (!is_static && rc == may_rewrite) { 2552 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2553 } 2554 { 2555 Label acquire_float; 2556 __ beq(CCR6, acquire_float); // Volatile? 2557 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2558 2559 __ bind(acquire_float); 2560 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2561 __ beq_predict_taken(CCR0, Lisync); 2562 __ b(Lisync); // In case of NAN. 2563 } 2564 2565 __ align(32, 28, 28); // Align load. 2566 // __ bind(Litos); 2567 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2568 assert(branch_table[itos] == 0, "can't compute twice"); 2569 branch_table[itos] = __ pc(); // non-volatile_entry point 2570 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2571 __ push(itos); 2572 if (!is_static && rc == may_rewrite) { 2573 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2574 } 2575 __ beq(CCR6, Lacquire); // Volatile? 2576 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2577 2578 __ align(32, 28, 28); // Align load. 2579 // __ bind(Lltos); 2580 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2581 assert(branch_table[ltos] == 0, "can't compute twice"); 2582 branch_table[ltos] = __ pc(); // non-volatile_entry point 2583 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2584 __ push(ltos); 2585 if (!is_static && rc == may_rewrite) { 2586 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2587 } 2588 __ beq(CCR6, Lacquire); // Volatile? 2589 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2590 2591 __ align(32, 28, 28); // Align load. 2592 // __ bind(Lbtos); 2593 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2594 assert(branch_table[btos] == 0, "can't compute twice"); 2595 branch_table[btos] = __ pc(); // non-volatile_entry point 2596 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2597 __ extsb(R17_tos, R17_tos); 2598 __ push(btos); 2599 if (!is_static && rc == may_rewrite) { 2600 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2601 } 2602 __ beq(CCR6, Lacquire); // Volatile? 2603 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2604 2605 __ align(32, 28, 28); // Align load. 2606 // __ bind(Lztos); (same code as btos) 2607 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2608 assert(branch_table[ztos] == 0, "can't compute twice"); 2609 branch_table[ztos] = __ pc(); // non-volatile_entry point 2610 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2611 __ push(ztos); 2612 if (!is_static && rc == may_rewrite) { 2613 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2614 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2615 } 2616 __ beq(CCR6, Lacquire); // Volatile? 2617 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2618 2619 __ align(32, 28, 28); // Align load. 2620 // __ bind(Lctos); 2621 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2622 assert(branch_table[ctos] == 0, "can't compute twice"); 2623 branch_table[ctos] = __ pc(); // non-volatile_entry point 2624 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2625 __ push(ctos); 2626 if (!is_static && rc == may_rewrite) { 2627 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2628 } 2629 __ beq(CCR6, Lacquire); // Volatile? 2630 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2631 2632 __ align(32, 28, 28); // Align load. 2633 // __ bind(Lstos); 2634 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2635 assert(branch_table[stos] == 0, "can't compute twice"); 2636 branch_table[stos] = __ pc(); // non-volatile_entry point 2637 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2638 __ push(stos); 2639 if (!is_static && rc == may_rewrite) { 2640 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2641 } 2642 __ beq(CCR6, Lacquire); // Volatile? 2643 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2644 2645 __ align(32, 28, 28); // Align load. 2646 // __ bind(Latos); 2647 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2648 assert(branch_table[atos] == 0, "can't compute twice"); 2649 branch_table[atos] = __ pc(); // non-volatile_entry point 2650 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 2651 __ verify_oop(R17_tos); 2652 __ push(atos); 2653 //__ dcbt(R17_tos); // prefetch 2654 if (!is_static && rc == may_rewrite) { 2655 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2656 } 2657 __ beq(CCR6, Lacquire); // Volatile? 2658 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2659 2660 __ align(32, 12); 2661 __ bind(Lacquire); 2662 __ twi_0(R17_tos); 2663 __ bind(Lisync); 2664 __ isync(); // acquire 2665 2666 #ifdef ASSERT 2667 for (int i = 0; i<number_of_states; ++i) { 2668 assert(branch_table[i], "get initialization"); 2669 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2670 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2671 } 2672 #endif 2673 } 2674 2675 void TemplateTable::getfield(int byte_no) { 2676 getfield_or_static(byte_no, false); 2677 } 2678 2679 void TemplateTable::nofast_getfield(int byte_no) { 2680 getfield_or_static(byte_no, false, may_not_rewrite); 2681 } 2682 2683 void TemplateTable::getstatic(int byte_no) { 2684 getfield_or_static(byte_no, true); 2685 } 2686 2687 // The registers cache and index expected to be set before call. 2688 // The function may destroy various registers, just not the cache and index registers. 2689 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2690 2691 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2692 2693 if (JvmtiExport::can_post_field_modification()) { 2694 Label Lno_field_mod_post; 2695 2696 // Check if post field access in enabled. 2697 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2698 __ lwz(Rscratch, offs, Rscratch); 2699 2700 __ cmpwi(CCR0, Rscratch, 0); 2701 __ beq(CCR0, Lno_field_mod_post); 2702 2703 // Do the post 2704 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2705 const Register Robj = Rscratch; 2706 2707 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2708 if (is_static) { 2709 // Life is simple. Null out the object pointer. 2710 __ li(Robj, 0); 2711 } else { 2712 // In case of the fast versions, value lives in registers => put it back on tos. 2713 int offs = Interpreter::expr_offset_in_bytes(0); 2714 Register base = R15_esp; 2715 switch(bytecode()) { 2716 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2717 case Bytecodes::_fast_iputfield: // Fall through 2718 case Bytecodes::_fast_bputfield: // Fall through 2719 case Bytecodes::_fast_zputfield: // Fall through 2720 case Bytecodes::_fast_cputfield: // Fall through 2721 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2722 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2723 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2724 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2725 default: { 2726 offs = 0; 2727 base = Robj; 2728 const Register Rflags = Robj; 2729 Label is_one_slot; 2730 // Life is harder. The stack holds the value on top, followed by the 2731 // object. We don't know the size of the value, though; it could be 2732 // one or two words depending on its type. As a result, we must find 2733 // the type to determine where the object is. 2734 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2735 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2736 2737 __ cmpwi(CCR0, Rflags, ltos); 2738 __ cmpwi(CCR1, Rflags, dtos); 2739 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2740 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2741 __ beq(CCR0, is_one_slot); 2742 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2743 __ bind(is_one_slot); 2744 break; 2745 } 2746 } 2747 __ ld(Robj, offs, base); 2748 __ verify_oop(Robj); 2749 } 2750 2751 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2752 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2753 __ get_cache_and_index_at_bcp(Rcache, 1); 2754 2755 // In case of the fast versions, value lives in registers => put it back on tos. 2756 switch(bytecode()) { 2757 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2758 case Bytecodes::_fast_iputfield: // Fall through 2759 case Bytecodes::_fast_bputfield: // Fall through 2760 case Bytecodes::_fast_zputfield: // Fall through 2761 case Bytecodes::_fast_cputfield: // Fall through 2762 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2763 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2764 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2765 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2766 default: break; // Nothin' to do. 2767 } 2768 2769 __ align(32, 12); 2770 __ bind(Lno_field_mod_post); 2771 } 2772 } 2773 2774 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2775 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2776 Label Lvolatile; 2777 2778 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2779 Rclass_or_obj = R31, // Needs to survive C call. 2780 Roffset = R22_tmp2, // Needs to survive C call. 2781 Rflags = R3_ARG1, 2782 Rbtable = R4_ARG2, 2783 Rscratch = R11_scratch1, 2784 Rscratch2 = R12_scratch2, 2785 Rscratch3 = R6_ARG4, 2786 Rbc = Rscratch3; 2787 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2788 2789 static address field_rw_branch_table[number_of_states], 2790 field_norw_branch_table[number_of_states], 2791 static_branch_table[number_of_states]; 2792 2793 address* branch_table = is_static ? static_branch_table : 2794 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2795 2796 // Stack (grows up): 2797 // value 2798 // obj 2799 2800 // Load the field offset. 2801 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2802 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2803 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2804 2805 // Load pointer to branch table. 2806 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2807 2808 // Get volatile flag. 2809 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2810 2811 // Check the field type. 2812 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2813 2814 #ifdef ASSERT 2815 Label LFlagInvalid; 2816 __ cmpldi(CCR0, Rflags, number_of_states); 2817 __ bge(CCR0, LFlagInvalid); 2818 #endif 2819 2820 // Load from branch table and dispatch (volatile case: one instruction ahead). 2821 __ sldi(Rflags, Rflags, LogBytesPerWord); 2822 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2823 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2824 } 2825 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2826 __ ldx(Rbtable, Rbtable, Rflags); 2827 2828 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2829 __ mtctr(Rbtable); 2830 __ bctr(); 2831 2832 #ifdef ASSERT 2833 __ bind(LFlagInvalid); 2834 __ stop("got invalid flag", 0x656); 2835 2836 // __ bind(Lvtos); 2837 address pc_before_release = __ pc(); 2838 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2839 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2840 assert(branch_table[vtos] == 0, "can't compute twice"); 2841 branch_table[vtos] = __ pc(); // non-volatile_entry point 2842 __ stop("vtos unexpected", 0x657); 2843 #endif 2844 2845 __ align(32, 28, 28); // Align pop. 2846 // __ bind(Ldtos); 2847 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2848 assert(branch_table[dtos] == 0, "can't compute twice"); 2849 branch_table[dtos] = __ pc(); // non-volatile_entry point 2850 __ pop(dtos); 2851 if (!is_static) { 2852 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2853 } 2854 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2855 if (!is_static && rc == may_rewrite) { 2856 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2857 } 2858 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2859 __ beq(CR_is_vol, Lvolatile); // Volatile? 2860 } 2861 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2862 2863 __ align(32, 28, 28); // Align pop. 2864 // __ bind(Lftos); 2865 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2866 assert(branch_table[ftos] == 0, "can't compute twice"); 2867 branch_table[ftos] = __ pc(); // non-volatile_entry point 2868 __ pop(ftos); 2869 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2870 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2871 if (!is_static && rc == may_rewrite) { 2872 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2873 } 2874 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2875 __ beq(CR_is_vol, Lvolatile); // Volatile? 2876 } 2877 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2878 2879 __ align(32, 28, 28); // Align pop. 2880 // __ bind(Litos); 2881 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2882 assert(branch_table[itos] == 0, "can't compute twice"); 2883 branch_table[itos] = __ pc(); // non-volatile_entry point 2884 __ pop(itos); 2885 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2886 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2887 if (!is_static && rc == may_rewrite) { 2888 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2889 } 2890 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2891 __ beq(CR_is_vol, Lvolatile); // Volatile? 2892 } 2893 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2894 2895 __ align(32, 28, 28); // Align pop. 2896 // __ bind(Lltos); 2897 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2898 assert(branch_table[ltos] == 0, "can't compute twice"); 2899 branch_table[ltos] = __ pc(); // non-volatile_entry point 2900 __ pop(ltos); 2901 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2902 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2903 if (!is_static && rc == may_rewrite) { 2904 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2905 } 2906 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2907 __ beq(CR_is_vol, Lvolatile); // Volatile? 2908 } 2909 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2910 2911 __ align(32, 28, 28); // Align pop. 2912 // __ bind(Lbtos); 2913 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2914 assert(branch_table[btos] == 0, "can't compute twice"); 2915 branch_table[btos] = __ pc(); // non-volatile_entry point 2916 __ pop(btos); 2917 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2918 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2919 if (!is_static && rc == may_rewrite) { 2920 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2921 } 2922 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2923 __ beq(CR_is_vol, Lvolatile); // Volatile? 2924 } 2925 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2926 2927 __ align(32, 28, 28); // Align pop. 2928 // __ bind(Lztos); 2929 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2930 assert(branch_table[ztos] == 0, "can't compute twice"); 2931 branch_table[ztos] = __ pc(); // non-volatile_entry point 2932 __ pop(ztos); 2933 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2934 __ andi(R17_tos, R17_tos, 0x1); 2935 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2936 if (!is_static && rc == may_rewrite) { 2937 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); 2938 } 2939 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2940 __ beq(CR_is_vol, Lvolatile); // Volatile? 2941 } 2942 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2943 2944 __ align(32, 28, 28); // Align pop. 2945 // __ bind(Lctos); 2946 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2947 assert(branch_table[ctos] == 0, "can't compute twice"); 2948 branch_table[ctos] = __ pc(); // non-volatile_entry point 2949 __ pop(ctos); 2950 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2951 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2952 if (!is_static && rc == may_rewrite) { 2953 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 2954 } 2955 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2956 __ beq(CR_is_vol, Lvolatile); // Volatile? 2957 } 2958 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2959 2960 __ align(32, 28, 28); // Align pop. 2961 // __ bind(Lstos); 2962 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2963 assert(branch_table[stos] == 0, "can't compute twice"); 2964 branch_table[stos] = __ pc(); // non-volatile_entry point 2965 __ pop(stos); 2966 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2967 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2968 if (!is_static && rc == may_rewrite) { 2969 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 2970 } 2971 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2972 __ beq(CR_is_vol, Lvolatile); // Volatile? 2973 } 2974 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2975 2976 __ align(32, 28, 28); // Align pop. 2977 // __ bind(Latos); 2978 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2979 assert(branch_table[atos] == 0, "can't compute twice"); 2980 branch_table[atos] = __ pc(); // non-volatile_entry point 2981 __ pop(atos); 2982 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2983 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP); 2984 if (!is_static && rc == may_rewrite) { 2985 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 2986 } 2987 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2988 __ beq(CR_is_vol, Lvolatile); // Volatile? 2989 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2990 2991 __ align(32, 12); 2992 __ bind(Lvolatile); 2993 __ fence(); 2994 } 2995 // fallthru: __ b(Lexit); 2996 2997 #ifdef ASSERT 2998 for (int i = 0; i<number_of_states; ++i) { 2999 assert(branch_table[i], "put initialization"); 3000 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 3001 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 3002 } 3003 #endif 3004 } 3005 3006 void TemplateTable::putfield(int byte_no) { 3007 putfield_or_static(byte_no, false); 3008 } 3009 3010 void TemplateTable::nofast_putfield(int byte_no) { 3011 putfield_or_static(byte_no, false, may_not_rewrite); 3012 } 3013 3014 void TemplateTable::putstatic(int byte_no) { 3015 putfield_or_static(byte_no, true); 3016 } 3017 3018 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 3019 void TemplateTable::jvmti_post_fast_field_mod() { 3020 __ should_not_reach_here(); 3021 } 3022 3023 void TemplateTable::fast_storefield(TosState state) { 3024 transition(state, vtos); 3025 3026 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 3027 Rclass_or_obj = R31, // Needs to survive C call. 3028 Roffset = R22_tmp2, // Needs to survive C call. 3029 Rflags = R3_ARG1, 3030 Rscratch = R11_scratch1, 3031 Rscratch2 = R12_scratch2, 3032 Rscratch3 = R4_ARG2; 3033 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 3034 3035 // Constant pool already resolved => Load flags and offset of field. 3036 __ get_cache_and_index_at_bcp(Rcache, 1); 3037 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 3038 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3039 3040 // Get the obj and the final store addr. 3041 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 3042 3043 // Get volatile flag. 3044 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3045 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 3046 { 3047 Label LnotVolatile; 3048 __ beq(CCR0, LnotVolatile); 3049 __ release(); 3050 __ align(32, 12); 3051 __ bind(LnotVolatile); 3052 } 3053 3054 // Do the store and fencing. 3055 switch(bytecode()) { 3056 case Bytecodes::_fast_aputfield: 3057 // Store into the field. 3058 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP); 3059 break; 3060 3061 case Bytecodes::_fast_iputfield: 3062 __ stwx(R17_tos, Rclass_or_obj, Roffset); 3063 break; 3064 3065 case Bytecodes::_fast_lputfield: 3066 __ stdx(R17_tos, Rclass_or_obj, Roffset); 3067 break; 3068 3069 case Bytecodes::_fast_zputfield: 3070 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 3071 // fall through to bputfield 3072 case Bytecodes::_fast_bputfield: 3073 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3074 break; 3075 3076 case Bytecodes::_fast_cputfield: 3077 case Bytecodes::_fast_sputfield: 3078 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3079 break; 3080 3081 case Bytecodes::_fast_fputfield: 3082 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 3083 break; 3084 3085 case Bytecodes::_fast_dputfield: 3086 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 3087 break; 3088 3089 default: ShouldNotReachHere(); 3090 } 3091 3092 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3093 Label LVolatile; 3094 __ beq(CR_is_vol, LVolatile); 3095 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3096 3097 __ align(32, 12); 3098 __ bind(LVolatile); 3099 __ fence(); 3100 } 3101 } 3102 3103 void TemplateTable::fast_accessfield(TosState state) { 3104 transition(atos, state); 3105 3106 Label LisVolatile; 3107 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3108 3109 const Register Rcache = R3_ARG1, 3110 Rclass_or_obj = R17_tos, 3111 Roffset = R22_tmp2, 3112 Rflags = R23_tmp3, 3113 Rscratch = R12_scratch2; 3114 3115 // Constant pool already resolved. Get the field offset. 3116 __ get_cache_and_index_at_bcp(Rcache, 1); 3117 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3118 3119 // JVMTI support 3120 jvmti_post_field_access(Rcache, Rscratch, false, true); 3121 3122 // Get the load address. 3123 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3124 3125 // Get volatile flag. 3126 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3127 __ bne(CCR0, LisVolatile); 3128 3129 switch(bytecode()) { 3130 case Bytecodes::_fast_agetfield: 3131 { 3132 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3133 __ verify_oop(R17_tos); 3134 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3135 3136 __ bind(LisVolatile); 3137 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3138 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3139 __ verify_oop(R17_tos); 3140 __ twi_0(R17_tos); 3141 __ isync(); 3142 break; 3143 } 3144 case Bytecodes::_fast_igetfield: 3145 { 3146 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3147 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3148 3149 __ bind(LisVolatile); 3150 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3151 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3152 __ twi_0(R17_tos); 3153 __ isync(); 3154 break; 3155 } 3156 case Bytecodes::_fast_lgetfield: 3157 { 3158 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3159 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3160 3161 __ bind(LisVolatile); 3162 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3163 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3164 __ twi_0(R17_tos); 3165 __ isync(); 3166 break; 3167 } 3168 case Bytecodes::_fast_bgetfield: 3169 { 3170 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3171 __ extsb(R17_tos, R17_tos); 3172 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3173 3174 __ bind(LisVolatile); 3175 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3176 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3177 __ twi_0(R17_tos); 3178 __ extsb(R17_tos, R17_tos); 3179 __ isync(); 3180 break; 3181 } 3182 case Bytecodes::_fast_cgetfield: 3183 { 3184 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3185 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3186 3187 __ bind(LisVolatile); 3188 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3189 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3190 __ twi_0(R17_tos); 3191 __ isync(); 3192 break; 3193 } 3194 case Bytecodes::_fast_sgetfield: 3195 { 3196 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3197 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3198 3199 __ bind(LisVolatile); 3200 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3201 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3202 __ twi_0(R17_tos); 3203 __ isync(); 3204 break; 3205 } 3206 case Bytecodes::_fast_fgetfield: 3207 { 3208 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3209 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3210 3211 __ bind(LisVolatile); 3212 Label Ldummy; 3213 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3214 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3215 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3216 __ bne_predict_not_taken(CCR0, Ldummy); 3217 __ bind(Ldummy); 3218 __ isync(); 3219 break; 3220 } 3221 case Bytecodes::_fast_dgetfield: 3222 { 3223 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3224 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3225 3226 __ bind(LisVolatile); 3227 Label Ldummy; 3228 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3229 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3230 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3231 __ bne_predict_not_taken(CCR0, Ldummy); 3232 __ bind(Ldummy); 3233 __ isync(); 3234 break; 3235 } 3236 default: ShouldNotReachHere(); 3237 } 3238 } 3239 3240 void TemplateTable::fast_xaccess(TosState state) { 3241 transition(vtos, state); 3242 3243 Label LisVolatile; 3244 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3245 const Register Rcache = R3_ARG1, 3246 Rclass_or_obj = R17_tos, 3247 Roffset = R22_tmp2, 3248 Rflags = R23_tmp3, 3249 Rscratch = R12_scratch2; 3250 3251 __ ld(Rclass_or_obj, 0, R18_locals); 3252 3253 // Constant pool already resolved. Get the field offset. 3254 __ get_cache_and_index_at_bcp(Rcache, 2); 3255 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3256 3257 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3258 3259 // Needed to report exception at the correct bcp. 3260 __ addi(R14_bcp, R14_bcp, 1); 3261 3262 // Get the load address. 3263 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3264 3265 // Get volatile flag. 3266 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3267 __ bne(CCR0, LisVolatile); 3268 3269 switch(state) { 3270 case atos: 3271 { 3272 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3273 __ verify_oop(R17_tos); 3274 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3275 3276 __ bind(LisVolatile); 3277 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3278 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3279 __ verify_oop(R17_tos); 3280 __ twi_0(R17_tos); 3281 __ isync(); 3282 break; 3283 } 3284 case itos: 3285 { 3286 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3287 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3288 3289 __ bind(LisVolatile); 3290 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3291 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3292 __ twi_0(R17_tos); 3293 __ isync(); 3294 break; 3295 } 3296 case ftos: 3297 { 3298 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3299 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3300 3301 __ bind(LisVolatile); 3302 Label Ldummy; 3303 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3304 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3305 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3306 __ bne_predict_not_taken(CCR0, Ldummy); 3307 __ bind(Ldummy); 3308 __ isync(); 3309 break; 3310 } 3311 default: ShouldNotReachHere(); 3312 } 3313 __ addi(R14_bcp, R14_bcp, -1); 3314 } 3315 3316 // ============================================================================ 3317 // Calls 3318 3319 // Common code for invoke 3320 // 3321 // Input: 3322 // - byte_no 3323 // 3324 // Output: 3325 // - Rmethod: The method to invoke next. 3326 // - Rret_addr: The return address to return to. 3327 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3328 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3329 // - Rflags: Method flags from const pool cache. 3330 // 3331 // Kills: 3332 // - Rscratch1 3333 // 3334 void TemplateTable::prepare_invoke(int byte_no, 3335 Register Rmethod, // linked method (or i-klass) 3336 Register Rret_addr,// return address 3337 Register Rindex, // itable index, MethodType, etc. 3338 Register Rrecv, // If caller wants to see it. 3339 Register Rflags, // If caller wants to test it. 3340 Register Rscratch 3341 ) { 3342 // Determine flags. 3343 const Bytecodes::Code code = bytecode(); 3344 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3345 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3346 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3347 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3348 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3349 const bool load_receiver = (Rrecv != noreg); 3350 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3351 3352 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3353 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3354 assert_different_registers(Rret_addr, Rscratch); 3355 3356 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3357 3358 // Saving of SP done in call_from_interpreter. 3359 3360 // Maybe push "appendix" to arguments. 3361 if (is_invokedynamic || is_invokehandle) { 3362 Label Ldone; 3363 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3364 __ beq(CCR0, Ldone); 3365 // Push "appendix" (MethodType, CallSite, etc.). 3366 // This must be done before we get the receiver, 3367 // since the parameter_size includes it. 3368 __ load_resolved_reference_at_index(Rscratch, Rindex); 3369 __ verify_oop(Rscratch); 3370 __ push_ptr(Rscratch); 3371 __ bind(Ldone); 3372 } 3373 3374 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3375 if (load_receiver) { 3376 const Register Rparam_count = Rscratch; 3377 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3378 __ load_receiver(Rparam_count, Rrecv); 3379 __ verify_oop(Rrecv); 3380 } 3381 3382 // Get return address. 3383 { 3384 Register Rtable_addr = Rscratch; 3385 Register Rret_type = Rret_addr; 3386 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3387 3388 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3389 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3390 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3391 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3392 // Get return address. 3393 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3394 } 3395 } 3396 3397 // Helper for virtual calls. Load target out of vtable and jump off! 3398 // Kills all passed registers. 3399 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3400 3401 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3402 const Register Rtarget_method = Rindex; 3403 3404 // Get target method & entry point. 3405 const int base = in_bytes(Klass::vtable_start_offset()); 3406 // Calc vtable addr scale the vtable index by 8. 3407 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes())); 3408 // Load target. 3409 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3410 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3411 // Argument and return type profiling. 3412 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3413 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3414 } 3415 3416 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3417 void TemplateTable::invokevirtual(int byte_no) { 3418 transition(vtos, vtos); 3419 3420 Register Rtable_addr = R11_scratch1, 3421 Rret_type = R12_scratch2, 3422 Rret_addr = R5_ARG3, 3423 Rflags = R22_tmp2, // Should survive C call. 3424 Rrecv = R3_ARG1, 3425 Rrecv_klass = Rrecv, 3426 Rvtableindex_or_method = R31, // Should survive C call. 3427 Rnum_params = R4_ARG2, 3428 Rnew_bc = R6_ARG4; 3429 3430 Label LnotFinal; 3431 3432 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3433 3434 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3435 __ bfalse(CCR0, LnotFinal); 3436 3437 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 3438 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3439 } 3440 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3441 3442 __ align(32, 12); 3443 __ bind(LnotFinal); 3444 // Load "this" pointer (receiver). 3445 __ rldicl(Rnum_params, Rflags, 64, 48); 3446 __ load_receiver(Rnum_params, Rrecv); 3447 __ verify_oop(Rrecv); 3448 3449 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3450 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3451 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3452 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3453 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3454 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3455 __ load_klass(Rrecv_klass, Rrecv); 3456 __ verify_klass_ptr(Rrecv_klass); 3457 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3458 3459 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3460 } 3461 3462 void TemplateTable::fast_invokevfinal(int byte_no) { 3463 transition(vtos, vtos); 3464 3465 assert(byte_no == f2_byte, "use this argument"); 3466 Register Rflags = R22_tmp2, 3467 Rmethod = R31; 3468 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3469 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3470 } 3471 3472 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3473 3474 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3475 3476 // Load receiver from stack slot. 3477 Register Rrecv = Rscratch2; 3478 Register Rnum_params = Rrecv; 3479 3480 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3481 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3482 3483 // Get return address. 3484 Register Rtable_addr = Rscratch1, 3485 Rret_addr = Rflags, 3486 Rret_type = Rret_addr; 3487 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3488 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3489 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3490 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3491 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3492 3493 // Load receiver and receiver NULL check. 3494 __ load_receiver(Rnum_params, Rrecv); 3495 __ null_check_throw(Rrecv, -1, Rscratch1); 3496 3497 __ profile_final_call(Rrecv, Rscratch1); 3498 // Argument and return type profiling. 3499 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3500 3501 // Do the call. 3502 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3503 } 3504 3505 void TemplateTable::invokespecial(int byte_no) { 3506 assert(byte_no == f1_byte, "use this argument"); 3507 transition(vtos, vtos); 3508 3509 Register Rtable_addr = R3_ARG1, 3510 Rret_addr = R4_ARG2, 3511 Rflags = R5_ARG3, 3512 Rreceiver = R6_ARG4, 3513 Rmethod = R31; 3514 3515 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3516 3517 // Receiver NULL check. 3518 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3519 3520 __ profile_call(R11_scratch1, R12_scratch2); 3521 // Argument and return type profiling. 3522 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3523 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3524 } 3525 3526 void TemplateTable::invokestatic(int byte_no) { 3527 assert(byte_no == f1_byte, "use this argument"); 3528 transition(vtos, vtos); 3529 3530 Register Rtable_addr = R3_ARG1, 3531 Rret_addr = R4_ARG2, 3532 Rflags = R5_ARG3; 3533 3534 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3535 3536 __ profile_call(R11_scratch1, R12_scratch2); 3537 // Argument and return type profiling. 3538 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3539 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3540 } 3541 3542 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3543 Register Rret, 3544 Register Rflags, 3545 Register Rmethod, 3546 Register Rtemp1, 3547 Register Rtemp2) { 3548 3549 assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3550 Label LnotFinal; 3551 3552 // Check for vfinal. 3553 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3554 __ bfalse(CCR0, LnotFinal); 3555 3556 Register Rscratch = Rflags; // Rflags is dead now. 3557 3558 // Final call case. 3559 __ profile_final_call(Rtemp1, Rscratch); 3560 // Argument and return type profiling. 3561 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true); 3562 // Do the final call - the index (f2) contains the method. 3563 __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */); 3564 3565 // Non-final callc case. 3566 __ bind(LnotFinal); 3567 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3568 generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch); 3569 } 3570 3571 void TemplateTable::invokeinterface(int byte_no) { 3572 assert(byte_no == f1_byte, "use this argument"); 3573 transition(vtos, vtos); 3574 3575 const Register Rscratch1 = R11_scratch1, 3576 Rscratch2 = R12_scratch2, 3577 Rmethod = R6_ARG4, 3578 Rmethod2 = R9_ARG7, 3579 Rinterface_klass = R5_ARG3, 3580 Rret_addr = R8_ARG6, 3581 Rindex = R10_ARG8, 3582 Rreceiver = R3_ARG1, 3583 Rrecv_klass = R4_ARG2, 3584 Rflags = R7_ARG5; 3585 3586 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1); 3587 3588 // Get receiver klass. 3589 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2); 3590 __ load_klass(Rrecv_klass, Rreceiver); 3591 3592 // Check corner case object method. 3593 Label LobjectMethod, L_no_such_interface, Lthrow_ame; 3594 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3595 __ btrue(CCR0, LobjectMethod); 3596 3597 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2, 3598 L_no_such_interface, /*return_method=*/false); 3599 3600 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3601 3602 // Find entry point to call. 3603 3604 // Get declaring interface class from method 3605 __ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod); 3606 __ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass); 3607 __ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass); 3608 3609 // Get itable index from method 3610 __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod); 3611 __ subfic(Rindex, Rindex, Method::itable_index_max); 3612 3613 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2, 3614 L_no_such_interface); 3615 3616 __ cmpdi(CCR0, Rmethod2, 0); 3617 __ beq(CCR0, Lthrow_ame); 3618 // Found entry. Jump off! 3619 // Argument and return type profiling. 3620 __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true); 3621 //__ profile_called_method(Rindex, Rscratch1); 3622 __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2); 3623 3624 // Vtable entry was NULL => Throw abstract method error. 3625 __ bind(Lthrow_ame); 3626 // Pass arguments for generating a verbose error message. 3627 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3628 Rrecv_klass, Rmethod); 3629 3630 // Interface was not found => Throw incompatible class change error. 3631 __ bind(L_no_such_interface); 3632 // Pass arguments for generating a verbose error message. 3633 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3634 Rrecv_klass, Rinterface_klass); 3635 DEBUG_ONLY( __ should_not_reach_here(); ) 3636 3637 // Special case of invokeinterface called for virtual method of 3638 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3639 // The invokeinterface was rewritten to a invokevirtual, hence we have 3640 // to handle this corner case. This code isn't produced by javac, but could 3641 // be produced by another compliant java compiler. 3642 __ bind(LobjectMethod); 3643 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2); 3644 } 3645 3646 void TemplateTable::invokedynamic(int byte_no) { 3647 transition(vtos, vtos); 3648 3649 const Register Rret_addr = R3_ARG1, 3650 Rflags = R4_ARG2, 3651 Rmethod = R22_tmp2, 3652 Rscratch1 = R11_scratch1, 3653 Rscratch2 = R12_scratch2; 3654 3655 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3656 3657 // Profile this call. 3658 __ profile_call(Rscratch1, Rscratch2); 3659 3660 // Off we go. With the new method handles, we don't jump to a method handle 3661 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3662 // to be the callsite object the bootstrap method returned. This is passed to a 3663 // "link" method which does the dispatch (Most likely just grabs the MH stored 3664 // inside the callsite and does an invokehandle). 3665 // Argument and return type profiling. 3666 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3667 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3668 } 3669 3670 void TemplateTable::invokehandle(int byte_no) { 3671 transition(vtos, vtos); 3672 3673 const Register Rret_addr = R3_ARG1, 3674 Rflags = R4_ARG2, 3675 Rrecv = R5_ARG3, 3676 Rmethod = R22_tmp2, 3677 Rscratch1 = R11_scratch1, 3678 Rscratch2 = R12_scratch2; 3679 3680 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3681 __ verify_method_ptr(Rmethod); 3682 __ null_check_throw(Rrecv, -1, Rscratch2); 3683 3684 __ profile_final_call(Rrecv, Rscratch1); 3685 3686 // Still no call from handle => We call the method handle interpreter here. 3687 // Argument and return type profiling. 3688 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3689 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3690 } 3691 3692 // ============================================================================= 3693 // Allocation 3694 3695 // Puts allocated obj ref onto the expression stack. 3696 void TemplateTable::_new() { 3697 transition(vtos, atos); 3698 3699 Label Lslow_case, 3700 Ldone; 3701 3702 const Register RallocatedObject = R17_tos, 3703 RinstanceKlass = R9_ARG7, 3704 Rscratch = R11_scratch1, 3705 Roffset = R8_ARG6, 3706 Rinstance_size = Roffset, 3707 Rcpool = R4_ARG2, 3708 Rtags = R3_ARG1, 3709 Rindex = R5_ARG3; 3710 3711 // -------------------------------------------------------------------------- 3712 // Check if fast case is possible. 3713 3714 // Load pointers to const pool and const pool's tags array. 3715 __ get_cpool_and_tags(Rcpool, Rtags); 3716 // Load index of constant pool entry. 3717 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3718 3719 // Note: compared to other architectures, PPC's implementation always goes 3720 // to the slow path if TLAB is used and fails. 3721 if (UseTLAB) { 3722 // Make sure the class we're about to instantiate has been resolved 3723 // This is done before loading instanceKlass to be consistent with the order 3724 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3725 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3726 __ lbzx(Rtags, Rindex, Rtags); 3727 3728 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3729 __ bne(CCR0, Lslow_case); 3730 3731 // Get instanceKlass 3732 __ sldi(Roffset, Rindex, LogBytesPerWord); 3733 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass); 3734 3735 // Make sure klass is fully initialized and get instance_size. 3736 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3737 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3738 3739 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3740 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3741 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3742 3743 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3744 __ beq(CCR0, Lslow_case); 3745 3746 // -------------------------------------------------------------------------- 3747 // Fast case: 3748 // Allocate the instance. 3749 // 1) Try to allocate in the TLAB. 3750 // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3751 3752 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3753 Register RnewTopValue = R6_ARG4; 3754 Register RendValue = R7_ARG5; 3755 3756 // Check if we can allocate in the TLAB. 3757 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3758 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3759 3760 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3761 3762 // If there is enough space, we do not CAS and do not clear. 3763 __ cmpld(CCR0, RnewTopValue, RendValue); 3764 __ bgt(CCR0, Lslow_case); 3765 3766 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3767 3768 if (!ZeroTLAB) { 3769 // -------------------------------------------------------------------------- 3770 // Init1: Zero out newly allocated memory. 3771 // Initialize remaining object fields. 3772 Register Rbase = Rtags; 3773 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3774 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3775 __ srdi(Rinstance_size, Rinstance_size, 3); 3776 3777 // Clear out object skipping header. Takes also care of the zero length case. 3778 __ clear_memory_doubleword(Rbase, Rinstance_size); 3779 } 3780 3781 // -------------------------------------------------------------------------- 3782 // Init2: Initialize the header: mark, klass 3783 // Init mark. 3784 if (UseBiasedLocking) { 3785 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3786 } else { 3787 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3788 } 3789 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3790 3791 // Init klass. 3792 __ store_klass_gap(RallocatedObject); 3793 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3794 3795 // Check and trigger dtrace event. 3796 SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone); 3797 __ push(atos); 3798 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3799 __ pop(atos); 3800 3801 __ b(Ldone); 3802 } 3803 3804 // -------------------------------------------------------------------------- 3805 // slow case 3806 __ bind(Lslow_case); 3807 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3808 3809 // continue 3810 __ bind(Ldone); 3811 3812 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3813 __ membar(Assembler::StoreStore); 3814 } 3815 3816 void TemplateTable::newarray() { 3817 transition(itos, atos); 3818 3819 __ lbz(R4, 1, R14_bcp); 3820 __ extsw(R5, R17_tos); 3821 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3822 3823 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3824 __ membar(Assembler::StoreStore); 3825 } 3826 3827 void TemplateTable::anewarray() { 3828 transition(itos, atos); 3829 3830 __ get_constant_pool(R4); 3831 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3832 __ extsw(R6, R17_tos); // size 3833 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3834 3835 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3836 __ membar(Assembler::StoreStore); 3837 } 3838 3839 // Allocate a multi dimensional array 3840 void TemplateTable::multianewarray() { 3841 transition(vtos, atos); 3842 3843 Register Rptr = R31; // Needs to survive C call. 3844 3845 // Put ndims * wordSize into frame temp slot 3846 __ lbz(Rptr, 3, R14_bcp); 3847 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3848 // Esp points past last_dim, so set to R4 to first_dim address. 3849 __ add(R4, Rptr, R15_esp); 3850 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3851 // Pop all dimensions off the stack. 3852 __ add(R15_esp, Rptr, R15_esp); 3853 3854 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3855 __ membar(Assembler::StoreStore); 3856 } 3857 3858 void TemplateTable::arraylength() { 3859 transition(atos, itos); 3860 3861 Label LnoException; 3862 __ verify_oop(R17_tos); 3863 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3864 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3865 } 3866 3867 // ============================================================================ 3868 // Typechecks 3869 3870 void TemplateTable::checkcast() { 3871 transition(atos, atos); 3872 3873 Label Ldone, Lis_null, Lquicked, Lresolved; 3874 Register Roffset = R6_ARG4, 3875 RobjKlass = R4_ARG2, 3876 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3877 Rcpool = R11_scratch1, 3878 Rtags = R12_scratch2; 3879 3880 // Null does not pass. 3881 __ cmpdi(CCR0, R17_tos, 0); 3882 __ beq(CCR0, Lis_null); 3883 3884 // Get constant pool tag to find out if the bytecode has already been "quickened". 3885 __ get_cpool_and_tags(Rcpool, Rtags); 3886 3887 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3888 3889 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3890 __ lbzx(Rtags, Rtags, Roffset); 3891 3892 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3893 __ beq(CCR0, Lquicked); 3894 3895 // Call into the VM to "quicken" instanceof. 3896 __ push_ptr(); // for GC 3897 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3898 __ get_vm_result_2(RspecifiedKlass); 3899 __ pop_ptr(); // Restore receiver. 3900 __ b(Lresolved); 3901 3902 // Extract target class from constant pool. 3903 __ bind(Lquicked); 3904 __ sldi(Roffset, Roffset, LogBytesPerWord); 3905 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3906 3907 // Do the checkcast. 3908 __ bind(Lresolved); 3909 // Get value klass in RobjKlass. 3910 __ load_klass(RobjKlass, R17_tos); 3911 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3912 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3913 3914 // Not a subtype; so must throw exception 3915 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3916 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3917 __ mtctr(R11_scratch1); 3918 __ bctr(); 3919 3920 // Profile the null case. 3921 __ align(32, 12); 3922 __ bind(Lis_null); 3923 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3924 3925 __ align(32, 12); 3926 __ bind(Ldone); 3927 } 3928 3929 // Output: 3930 // - tos == 0: Obj was null or not an instance of class. 3931 // - tos == 1: Obj was an instance of class. 3932 void TemplateTable::instanceof() { 3933 transition(atos, itos); 3934 3935 Label Ldone, Lis_null, Lquicked, Lresolved; 3936 Register Roffset = R6_ARG4, 3937 RobjKlass = R4_ARG2, 3938 RspecifiedKlass = R5_ARG3, 3939 Rcpool = R11_scratch1, 3940 Rtags = R12_scratch2; 3941 3942 // Null does not pass. 3943 __ cmpdi(CCR0, R17_tos, 0); 3944 __ beq(CCR0, Lis_null); 3945 3946 // Get constant pool tag to find out if the bytecode has already been "quickened". 3947 __ get_cpool_and_tags(Rcpool, Rtags); 3948 3949 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3950 3951 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3952 __ lbzx(Rtags, Rtags, Roffset); 3953 3954 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3955 __ beq(CCR0, Lquicked); 3956 3957 // Call into the VM to "quicken" instanceof. 3958 __ push_ptr(); // for GC 3959 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3960 __ get_vm_result_2(RspecifiedKlass); 3961 __ pop_ptr(); // Restore receiver. 3962 __ b(Lresolved); 3963 3964 // Extract target class from constant pool. 3965 __ bind(Lquicked); 3966 __ sldi(Roffset, Roffset, LogBytesPerWord); 3967 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3968 3969 // Do the checkcast. 3970 __ bind(Lresolved); 3971 // Get value klass in RobjKlass. 3972 __ load_klass(RobjKlass, R17_tos); 3973 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3974 __ li(R17_tos, 1); 3975 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3976 __ li(R17_tos, 0); 3977 3978 if (ProfileInterpreter) { 3979 __ b(Ldone); 3980 } 3981 3982 // Profile the null case. 3983 __ align(32, 12); 3984 __ bind(Lis_null); 3985 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3986 3987 __ align(32, 12); 3988 __ bind(Ldone); 3989 } 3990 3991 // ============================================================================= 3992 // Breakpoints 3993 3994 void TemplateTable::_breakpoint() { 3995 transition(vtos, vtos); 3996 3997 // Get the unpatched byte code. 3998 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3999 __ mr(R31, R3_RET); 4000 4001 // Post the breakpoint event. 4002 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 4003 4004 // Complete the execution of original bytecode. 4005 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 4006 } 4007 4008 // ============================================================================= 4009 // Exceptions 4010 4011 void TemplateTable::athrow() { 4012 transition(atos, vtos); 4013 4014 // Exception oop is in tos 4015 __ verify_oop(R17_tos); 4016 4017 __ null_check_throw(R17_tos, -1, R11_scratch1); 4018 4019 // Throw exception interpreter entry expects exception oop to be in R3. 4020 __ mr(R3_RET, R17_tos); 4021 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 4022 __ mtctr(R11_scratch1); 4023 __ bctr(); 4024 } 4025 4026 // ============================================================================= 4027 // Synchronization 4028 // Searches the basic object lock list on the stack for a free slot 4029 // and uses it to lock the obect in tos. 4030 // 4031 // Recursive locking is enabled by exiting the search if the same 4032 // object is already found in the list. Thus, a new basic lock obj lock 4033 // is allocated "higher up" in the stack and thus is found first 4034 // at next monitor exit. 4035 void TemplateTable::monitorenter() { 4036 transition(atos, vtos); 4037 4038 __ verify_oop(R17_tos); 4039 4040 Register Rcurrent_monitor = R11_scratch1, 4041 Rcurrent_obj = R12_scratch2, 4042 Robj_to_lock = R17_tos, 4043 Rscratch1 = R3_ARG1, 4044 Rscratch2 = R4_ARG2, 4045 Rscratch3 = R5_ARG3, 4046 Rcurrent_obj_addr = R6_ARG4; 4047 4048 // ------------------------------------------------------------------------------ 4049 // Null pointer exception. 4050 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4051 4052 // Try to acquire a lock on the object. 4053 // Repeat until succeeded (i.e., until monitorenter returns true). 4054 4055 // ------------------------------------------------------------------------------ 4056 // Find a free slot in the monitor block. 4057 Label Lfound, Lexit, Lallocate_new; 4058 ConditionRegister found_free_slot = CCR0, 4059 found_same_obj = CCR1, 4060 reached_limit = CCR6; 4061 { 4062 Label Lloop, Lentry; 4063 Register Rlimit = Rcurrent_monitor; 4064 4065 // Set up search loop - start with topmost monitor. 4066 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4067 4068 __ ld(Rlimit, 0, R1_SP); 4069 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4070 4071 // Check if any slot is present => short cut to allocation if not. 4072 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4073 __ bgt(reached_limit, Lallocate_new); 4074 4075 // Pre-load topmost slot. 4076 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4077 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4078 // The search loop. 4079 __ bind(Lloop); 4080 // Found free slot? 4081 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4082 // Is this entry for same obj? If so, stop the search and take the found 4083 // free slot or allocate a new one to enable recursive locking. 4084 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4085 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4086 __ beq(found_free_slot, Lexit); 4087 __ beq(found_same_obj, Lallocate_new); 4088 __ bgt(reached_limit, Lallocate_new); 4089 // Check if last allocated BasicLockObj reached. 4090 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4091 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4092 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4093 __ b(Lloop); 4094 } 4095 4096 // ------------------------------------------------------------------------------ 4097 // Check if we found a free slot. 4098 __ bind(Lexit); 4099 4100 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4101 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4102 __ b(Lfound); 4103 4104 // We didn't find a free BasicObjLock => allocate one. 4105 __ align(32, 12); 4106 __ bind(Lallocate_new); 4107 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4108 __ mr(Rcurrent_monitor, R26_monitor); 4109 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4110 4111 // ------------------------------------------------------------------------------ 4112 // We now have a slot to lock. 4113 __ bind(Lfound); 4114 4115 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4116 // The object has already been poped from the stack, so the expression stack looks correct. 4117 __ addi(R14_bcp, R14_bcp, 1); 4118 4119 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4120 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4121 4122 // Check if there's enough space on the stack for the monitors after locking. 4123 // This emits a single store. 4124 __ generate_stack_overflow_check(0); 4125 4126 // The bcp has already been incremented. Just need to dispatch to next instruction. 4127 __ dispatch_next(vtos); 4128 } 4129 4130 void TemplateTable::monitorexit() { 4131 transition(atos, vtos); 4132 __ verify_oop(R17_tos); 4133 4134 Register Rcurrent_monitor = R11_scratch1, 4135 Rcurrent_obj = R12_scratch2, 4136 Robj_to_lock = R17_tos, 4137 Rcurrent_obj_addr = R3_ARG1, 4138 Rlimit = R4_ARG2; 4139 Label Lfound, Lillegal_monitor_state; 4140 4141 // Check corner case: unbalanced monitorEnter / Exit. 4142 __ ld(Rlimit, 0, R1_SP); 4143 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4144 4145 // Null pointer check. 4146 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4147 4148 __ cmpld(CCR0, R26_monitor, Rlimit); 4149 __ bgt(CCR0, Lillegal_monitor_state); 4150 4151 // Find the corresponding slot in the monitors stack section. 4152 { 4153 Label Lloop; 4154 4155 // Start with topmost monitor. 4156 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4157 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4158 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4159 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4160 4161 __ bind(Lloop); 4162 // Is this entry for same obj? 4163 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4164 __ beq(CCR0, Lfound); 4165 4166 // Check if last allocated BasicLockObj reached. 4167 4168 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4169 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4170 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4171 4172 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4173 __ ble(CCR0, Lloop); 4174 } 4175 4176 // Fell through without finding the basic obj lock => throw up! 4177 __ bind(Lillegal_monitor_state); 4178 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4179 __ should_not_reach_here(); 4180 4181 __ align(32, 12); 4182 __ bind(Lfound); 4183 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4184 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4185 __ unlock_object(Rcurrent_monitor); 4186 } 4187 4188 // ============================================================================ 4189 // Wide bytecodes 4190 4191 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4192 void TemplateTable::wide() { 4193 transition(vtos, vtos); 4194 4195 const Register Rtable = R11_scratch1, 4196 Rindex = R12_scratch2, 4197 Rtmp = R0; 4198 4199 __ lbz(Rindex, 1, R14_bcp); 4200 4201 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4202 4203 __ slwi(Rindex, Rindex, LogBytesPerWord); 4204 __ ldx(Rtmp, Rtable, Rindex); 4205 __ mtctr(Rtmp); 4206 __ bctr(); 4207 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4208 }