1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/frame.inline.hpp" 38 #include "runtime/safepointMechanism.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/synchronizer.hpp" 42 #include "utilities/macros.hpp" 43 44 #undef __ 45 #define __ _masm-> 46 47 // ============================================================================ 48 // Misc helpers 49 50 // Do an oop store like *(base + index) = val OR *(base + offset) = val 51 // (only one of both variants is possible at the same time). 52 // Index can be noreg. 53 // Kills: 54 // Rbase, Rtmp 55 static void do_oop_store(InterpreterMacroAssembler* _masm, 56 Register Rbase, 57 RegisterOrConstant offset, 58 Register Rval, // Noreg means always null. 59 Register Rtmp1, 60 Register Rtmp2, 61 Register Rtmp3, 62 BarrierSet::Name barrier, 63 bool precise, 64 bool check_null) { 65 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 66 67 switch (barrier) { 68 #if INCLUDE_ALL_GCS 69 case BarrierSet::G1BarrierSet: 70 { 71 // Load and record the previous value. 72 __ g1_write_barrier_pre(Rbase, offset, 73 Rtmp3, /* holder of pre_val ? */ 74 Rtmp1, Rtmp2, false /* frame */); 75 76 Label Lnull, Ldone; 77 if (Rval != noreg) { 78 if (check_null) { 79 __ cmpdi(CCR0, Rval, 0); 80 __ beq(CCR0, Lnull); 81 } 82 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 83 // Mark the card. 84 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 85 __ add(Rbase, offset, Rbase); 86 } 87 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 88 if (check_null) { __ b(Ldone); } 89 } 90 91 if (Rval == noreg || check_null) { // Store null oop. 92 Register Rnull = Rval; 93 __ bind(Lnull); 94 if (Rval == noreg) { 95 Rnull = Rtmp1; 96 __ li(Rnull, 0); 97 } 98 if (UseCompressedOops) { 99 __ stw(Rnull, offset, Rbase); 100 } else { 101 __ std(Rnull, offset, Rbase); 102 } 103 } 104 __ bind(Ldone); 105 } 106 break; 107 #endif // INCLUDE_ALL_GCS 108 case BarrierSet::CardTableBarrierSet: 109 { 110 Label Lnull, Ldone; 111 if (Rval != noreg) { 112 if (check_null) { 113 __ cmpdi(CCR0, Rval, 0); 114 __ beq(CCR0, Lnull); 115 } 116 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 117 // Mark the card. 118 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 119 __ add(Rbase, offset, Rbase); 120 } 121 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 122 if (check_null) { 123 __ b(Ldone); 124 } 125 } 126 127 if (Rval == noreg || check_null) { // Store null oop. 128 Register Rnull = Rval; 129 __ bind(Lnull); 130 if (Rval == noreg) { 131 Rnull = Rtmp1; 132 __ li(Rnull, 0); 133 } 134 if (UseCompressedOops) { 135 __ stw(Rnull, offset, Rbase); 136 } else { 137 __ std(Rnull, offset, Rbase); 138 } 139 } 140 __ bind(Ldone); 141 } 142 break; 143 case BarrierSet::ModRef: 144 ShouldNotReachHere(); 145 break; 146 default: 147 ShouldNotReachHere(); 148 } 149 } 150 151 // ============================================================================ 152 // Platform-dependent initialization 153 154 void TemplateTable::pd_initialize() { 155 // No ppc64 specific initialization. 156 } 157 158 Address TemplateTable::at_bcp(int offset) { 159 // Not used on ppc. 160 ShouldNotReachHere(); 161 return Address(); 162 } 163 164 // Patches the current bytecode (ptr to it located in bcp) 165 // in the bytecode stream with a new one. 166 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 167 // With sharing on, may need to test method flag. 168 if (!RewriteBytecodes) return; 169 Label L_patch_done; 170 171 switch (new_bc) { 172 case Bytecodes::_fast_aputfield: 173 case Bytecodes::_fast_bputfield: 174 case Bytecodes::_fast_zputfield: 175 case Bytecodes::_fast_cputfield: 176 case Bytecodes::_fast_dputfield: 177 case Bytecodes::_fast_fputfield: 178 case Bytecodes::_fast_iputfield: 179 case Bytecodes::_fast_lputfield: 180 case Bytecodes::_fast_sputfield: 181 { 182 // We skip bytecode quickening for putfield instructions when 183 // the put_code written to the constant pool cache is zero. 184 // This is required so that every execution of this instruction 185 // calls out to InterpreterRuntime::resolve_get_put to do 186 // additional, required work. 187 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 188 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 189 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 190 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 191 #if defined(VM_LITTLE_ENDIAN) 192 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 193 #else 194 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 195 #endif 196 __ cmpwi(CCR0, Rnew_bc, 0); 197 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 198 __ beq(CCR0, L_patch_done); 199 // __ isync(); // acquire not needed 200 break; 201 } 202 203 default: 204 assert(byte_no == -1, "sanity"); 205 if (load_bc_into_bc_reg) { 206 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 207 } 208 } 209 210 if (JvmtiExport::can_post_breakpoint()) { 211 Label L_fast_patch; 212 __ lbz(Rtemp, 0, R14_bcp); 213 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 214 __ bne(CCR0, L_fast_patch); 215 // Perform the quickening, slowly, in the bowels of the breakpoint table. 216 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 217 __ b(L_patch_done); 218 __ bind(L_fast_patch); 219 } 220 221 // Patch bytecode. 222 __ stb(Rnew_bc, 0, R14_bcp); 223 224 __ bind(L_patch_done); 225 } 226 227 // ============================================================================ 228 // Individual instructions 229 230 void TemplateTable::nop() { 231 transition(vtos, vtos); 232 // Nothing to do. 233 } 234 235 void TemplateTable::shouldnotreachhere() { 236 transition(vtos, vtos); 237 __ stop("shouldnotreachhere bytecode"); 238 } 239 240 void TemplateTable::aconst_null() { 241 transition(vtos, atos); 242 __ li(R17_tos, 0); 243 } 244 245 void TemplateTable::iconst(int value) { 246 transition(vtos, itos); 247 assert(value >= -1 && value <= 5, ""); 248 __ li(R17_tos, value); 249 } 250 251 void TemplateTable::lconst(int value) { 252 transition(vtos, ltos); 253 assert(value >= -1 && value <= 5, ""); 254 __ li(R17_tos, value); 255 } 256 257 void TemplateTable::fconst(int value) { 258 transition(vtos, ftos); 259 static float zero = 0.0; 260 static float one = 1.0; 261 static float two = 2.0; 262 switch (value) { 263 default: ShouldNotReachHere(); 264 case 0: { 265 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 266 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 267 break; 268 } 269 case 1: { 270 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 271 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 272 break; 273 } 274 case 2: { 275 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 276 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 277 break; 278 } 279 } 280 } 281 282 void TemplateTable::dconst(int value) { 283 transition(vtos, dtos); 284 static double zero = 0.0; 285 static double one = 1.0; 286 switch (value) { 287 case 0: { 288 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 289 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 290 break; 291 } 292 case 1: { 293 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 294 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 295 break; 296 } 297 default: ShouldNotReachHere(); 298 } 299 } 300 301 void TemplateTable::bipush() { 302 transition(vtos, itos); 303 __ lbz(R17_tos, 1, R14_bcp); 304 __ extsb(R17_tos, R17_tos); 305 } 306 307 void TemplateTable::sipush() { 308 transition(vtos, itos); 309 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 310 } 311 312 void TemplateTable::ldc(bool wide) { 313 Register Rscratch1 = R11_scratch1, 314 Rscratch2 = R12_scratch2, 315 Rcpool = R3_ARG1; 316 317 transition(vtos, vtos); 318 Label notInt, notFloat, notClass, exit; 319 320 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 321 if (wide) { // Read index. 322 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 323 } else { 324 __ lbz(Rscratch1, 1, R14_bcp); 325 } 326 327 const int base_offset = ConstantPool::header_size() * wordSize; 328 const int tags_offset = Array<u1>::base_offset_in_bytes(); 329 330 // Get type from tags. 331 __ addi(Rscratch2, Rscratch2, tags_offset); 332 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 333 334 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 335 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 336 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 337 338 // Resolved class - need to call vm to get java mirror of the class. 339 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 340 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 341 __ beq(CCR0, notClass); 342 343 __ li(R4, wide ? 1 : 0); 344 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 345 __ push(atos); 346 __ b(exit); 347 348 __ align(32, 12); 349 __ bind(notClass); 350 __ addi(Rcpool, Rcpool, base_offset); 351 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 352 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 353 __ bne(CCR0, notInt); 354 __ lwax(R17_tos, Rcpool, Rscratch1); 355 __ push(itos); 356 __ b(exit); 357 358 __ align(32, 12); 359 __ bind(notInt); 360 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 361 __ bne(CCR0, notFloat); 362 __ lfsx(F15_ftos, Rcpool, Rscratch1); 363 __ push(ftos); 364 __ b(exit); 365 366 __ align(32, 12); 367 // assume the tag is for condy; if not, the VM runtime will tell us 368 __ bind(notFloat); 369 condy_helper(exit); 370 371 __ align(32, 12); 372 __ bind(exit); 373 } 374 375 // Fast path for caching oop constants. 376 void TemplateTable::fast_aldc(bool wide) { 377 transition(vtos, atos); 378 379 int index_size = wide ? sizeof(u2) : sizeof(u1); 380 const Register Rscratch = R11_scratch1; 381 Label is_null; 382 383 // We are resolved if the resolved reference cache entry contains a 384 // non-null object (CallSite, etc.) 385 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 386 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 387 388 // Convert null sentinel to NULL. 389 int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); 390 __ ld(Rscratch, simm16_rest, Rscratch); 391 __ cmpld(CCR0, R17_tos, Rscratch); 392 if (VM_Version::has_isel()) { 393 __ isel_0(R17_tos, CCR0, Assembler::equal); 394 } else { 395 Label not_sentinel; 396 __ bne(CCR0, not_sentinel); 397 __ li(R17_tos, 0); 398 __ bind(not_sentinel); 399 } 400 __ verify_oop(R17_tos); 401 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 402 403 __ bind(is_null); 404 __ load_const_optimized(R3_ARG1, (int)bytecode()); 405 406 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 407 408 // First time invocation - must resolve first. 409 __ call_VM(R17_tos, entry, R3_ARG1); 410 __ verify_oop(R17_tos); 411 } 412 413 void TemplateTable::ldc2_w() { 414 transition(vtos, vtos); 415 Label not_double, not_long, exit; 416 417 Register Rindex = R11_scratch1, 418 Rcpool = R12_scratch2, 419 Rtag = R3_ARG1; 420 __ get_cpool_and_tags(Rcpool, Rtag); 421 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 422 423 const int base_offset = ConstantPool::header_size() * wordSize; 424 const int tags_offset = Array<u1>::base_offset_in_bytes(); 425 // Get type from tags. 426 __ addi(Rcpool, Rcpool, base_offset); 427 __ addi(Rtag, Rtag, tags_offset); 428 429 __ lbzx(Rtag, Rtag, Rindex); 430 __ sldi(Rindex, Rindex, LogBytesPerWord); 431 432 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 433 __ bne(CCR0, not_double); 434 __ lfdx(F15_ftos, Rcpool, Rindex); 435 __ push(dtos); 436 __ b(exit); 437 438 __ bind(not_double); 439 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); 440 __ bne(CCR0, not_long); 441 __ ldx(R17_tos, Rcpool, Rindex); 442 __ push(ltos); 443 __ b(exit); 444 445 __ bind(not_long); 446 condy_helper(exit); 447 448 __ align(32, 12); 449 __ bind(exit); 450 } 451 452 void TemplateTable::condy_helper(Label& Done) { 453 const Register obj = R31; 454 const Register off = R11_scratch1; 455 const Register flags = R12_scratch2; 456 const Register rarg = R4_ARG2; 457 __ li(rarg, (int)bytecode()); 458 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); 459 __ get_vm_result_2(flags); 460 461 // VMr = obj = base address to find primitive value to push 462 // VMr2 = flags = (tos, off) using format of CPCE::_flags 463 __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); 464 465 // What sort of thing are we loading? 466 __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 467 468 switch (bytecode()) { 469 case Bytecodes::_ldc: 470 case Bytecodes::_ldc_w: 471 { 472 // tos in (itos, ftos, stos, btos, ctos, ztos) 473 Label notInt, notFloat, notShort, notByte, notChar, notBool; 474 __ cmplwi(CCR0, flags, itos); 475 __ bne(CCR0, notInt); 476 // itos 477 __ lwax(R17_tos, obj, off); 478 __ push(itos); 479 __ b(Done); 480 481 __ bind(notInt); 482 __ cmplwi(CCR0, flags, ftos); 483 __ bne(CCR0, notFloat); 484 // ftos 485 __ lfsx(F15_ftos, obj, off); 486 __ push(ftos); 487 __ b(Done); 488 489 __ bind(notFloat); 490 __ cmplwi(CCR0, flags, stos); 491 __ bne(CCR0, notShort); 492 // stos 493 __ lhax(R17_tos, obj, off); 494 __ push(stos); 495 __ b(Done); 496 497 __ bind(notShort); 498 __ cmplwi(CCR0, flags, btos); 499 __ bne(CCR0, notByte); 500 // btos 501 __ lbzx(R17_tos, obj, off); 502 __ extsb(R17_tos, R17_tos); 503 __ push(btos); 504 __ b(Done); 505 506 __ bind(notByte); 507 __ cmplwi(CCR0, flags, ctos); 508 __ bne(CCR0, notChar); 509 // ctos 510 __ lhzx(R17_tos, obj, off); 511 __ push(ctos); 512 __ b(Done); 513 514 __ bind(notChar); 515 __ cmplwi(CCR0, flags, ztos); 516 __ bne(CCR0, notBool); 517 // ztos 518 __ lbzx(R17_tos, obj, off); 519 __ push(ztos); 520 __ b(Done); 521 522 __ bind(notBool); 523 break; 524 } 525 526 case Bytecodes::_ldc2_w: 527 { 528 Label notLong, notDouble; 529 __ cmplwi(CCR0, flags, ltos); 530 __ bne(CCR0, notLong); 531 // ltos 532 __ ldx(R17_tos, obj, off); 533 __ push(ltos); 534 __ b(Done); 535 536 __ bind(notLong); 537 __ cmplwi(CCR0, flags, dtos); 538 __ bne(CCR0, notDouble); 539 // dtos 540 __ lfdx(F15_ftos, obj, off); 541 __ push(dtos); 542 __ b(Done); 543 544 __ bind(notDouble); 545 break; 546 } 547 548 default: 549 ShouldNotReachHere(); 550 } 551 552 __ stop("bad ldc/condy"); 553 } 554 555 // Get the locals index located in the bytecode stream at bcp + offset. 556 void TemplateTable::locals_index(Register Rdst, int offset) { 557 __ lbz(Rdst, offset, R14_bcp); 558 } 559 560 void TemplateTable::iload() { 561 iload_internal(); 562 } 563 564 void TemplateTable::nofast_iload() { 565 iload_internal(may_not_rewrite); 566 } 567 568 void TemplateTable::iload_internal(RewriteControl rc) { 569 transition(vtos, itos); 570 571 // Get the local value into tos 572 const Register Rindex = R22_tmp2; 573 locals_index(Rindex); 574 575 // Rewrite iload,iload pair into fast_iload2 576 // iload,caload pair into fast_icaload 577 if (RewriteFrequentPairs && rc == may_rewrite) { 578 Label Lrewrite, Ldone; 579 Register Rnext_byte = R3_ARG1, 580 Rrewrite_to = R6_ARG4, 581 Rscratch = R11_scratch1; 582 583 // get next byte 584 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 585 586 // if _iload, wait to rewrite to iload2. We only want to rewrite the 587 // last two iloads in a pair. Comparing against fast_iload means that 588 // the next bytecode is neither an iload or a caload, and therefore 589 // an iload pair. 590 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 591 __ beq(CCR0, Ldone); 592 593 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 594 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 595 __ beq(CCR1, Lrewrite); 596 597 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 598 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 599 __ beq(CCR0, Lrewrite); 600 601 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 602 603 __ bind(Lrewrite); 604 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 605 __ bind(Ldone); 606 } 607 608 __ load_local_int(R17_tos, Rindex, Rindex); 609 } 610 611 // Load 2 integers in a row without dispatching 612 void TemplateTable::fast_iload2() { 613 transition(vtos, itos); 614 615 __ lbz(R3_ARG1, 1, R14_bcp); 616 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 617 618 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 619 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 620 __ push_i(R3_ARG1); 621 } 622 623 void TemplateTable::fast_iload() { 624 transition(vtos, itos); 625 // Get the local value into tos 626 627 const Register Rindex = R11_scratch1; 628 locals_index(Rindex); 629 __ load_local_int(R17_tos, Rindex, Rindex); 630 } 631 632 // Load a local variable type long from locals area to TOS cache register. 633 // Local index resides in bytecodestream. 634 void TemplateTable::lload() { 635 transition(vtos, ltos); 636 637 const Register Rindex = R11_scratch1; 638 locals_index(Rindex); 639 __ load_local_long(R17_tos, Rindex, Rindex); 640 } 641 642 void TemplateTable::fload() { 643 transition(vtos, ftos); 644 645 const Register Rindex = R11_scratch1; 646 locals_index(Rindex); 647 __ load_local_float(F15_ftos, Rindex, Rindex); 648 } 649 650 void TemplateTable::dload() { 651 transition(vtos, dtos); 652 653 const Register Rindex = R11_scratch1; 654 locals_index(Rindex); 655 __ load_local_double(F15_ftos, Rindex, Rindex); 656 } 657 658 void TemplateTable::aload() { 659 transition(vtos, atos); 660 661 const Register Rindex = R11_scratch1; 662 locals_index(Rindex); 663 __ load_local_ptr(R17_tos, Rindex, Rindex); 664 } 665 666 void TemplateTable::locals_index_wide(Register Rdst) { 667 // Offset is 2, not 1, because Lbcp points to wide prefix code. 668 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 669 } 670 671 void TemplateTable::wide_iload() { 672 // Get the local value into tos. 673 674 const Register Rindex = R11_scratch1; 675 locals_index_wide(Rindex); 676 __ load_local_int(R17_tos, Rindex, Rindex); 677 } 678 679 void TemplateTable::wide_lload() { 680 transition(vtos, ltos); 681 682 const Register Rindex = R11_scratch1; 683 locals_index_wide(Rindex); 684 __ load_local_long(R17_tos, Rindex, Rindex); 685 } 686 687 void TemplateTable::wide_fload() { 688 transition(vtos, ftos); 689 690 const Register Rindex = R11_scratch1; 691 locals_index_wide(Rindex); 692 __ load_local_float(F15_ftos, Rindex, Rindex); 693 } 694 695 void TemplateTable::wide_dload() { 696 transition(vtos, dtos); 697 698 const Register Rindex = R11_scratch1; 699 locals_index_wide(Rindex); 700 __ load_local_double(F15_ftos, Rindex, Rindex); 701 } 702 703 void TemplateTable::wide_aload() { 704 transition(vtos, atos); 705 706 const Register Rindex = R11_scratch1; 707 locals_index_wide(Rindex); 708 __ load_local_ptr(R17_tos, Rindex, Rindex); 709 } 710 711 void TemplateTable::iaload() { 712 transition(itos, itos); 713 714 const Register Rload_addr = R3_ARG1, 715 Rarray = R4_ARG2, 716 Rtemp = R5_ARG3; 717 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 718 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 719 } 720 721 void TemplateTable::laload() { 722 transition(itos, ltos); 723 724 const Register Rload_addr = R3_ARG1, 725 Rarray = R4_ARG2, 726 Rtemp = R5_ARG3; 727 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 728 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 729 } 730 731 void TemplateTable::faload() { 732 transition(itos, ftos); 733 734 const Register Rload_addr = R3_ARG1, 735 Rarray = R4_ARG2, 736 Rtemp = R5_ARG3; 737 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 738 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 739 } 740 741 void TemplateTable::daload() { 742 transition(itos, dtos); 743 744 const Register Rload_addr = R3_ARG1, 745 Rarray = R4_ARG2, 746 Rtemp = R5_ARG3; 747 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 748 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 749 } 750 751 void TemplateTable::aaload() { 752 transition(itos, atos); 753 754 // tos: index 755 // result tos: array 756 const Register Rload_addr = R3_ARG1, 757 Rarray = R4_ARG2, 758 Rtemp = R5_ARG3; 759 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 760 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 761 __ verify_oop(R17_tos); 762 //__ dcbt(R17_tos); // prefetch 763 } 764 765 void TemplateTable::baload() { 766 transition(itos, itos); 767 768 const Register Rload_addr = R3_ARG1, 769 Rarray = R4_ARG2, 770 Rtemp = R5_ARG3; 771 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 772 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 773 __ extsb(R17_tos, R17_tos); 774 } 775 776 void TemplateTable::caload() { 777 transition(itos, itos); 778 779 const Register Rload_addr = R3_ARG1, 780 Rarray = R4_ARG2, 781 Rtemp = R5_ARG3; 782 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 783 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 784 } 785 786 // Iload followed by caload frequent pair. 787 void TemplateTable::fast_icaload() { 788 transition(vtos, itos); 789 790 const Register Rload_addr = R3_ARG1, 791 Rarray = R4_ARG2, 792 Rtemp = R11_scratch1; 793 794 locals_index(R17_tos); 795 __ load_local_int(R17_tos, Rtemp, R17_tos); 796 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 797 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 798 } 799 800 void TemplateTable::saload() { 801 transition(itos, itos); 802 803 const Register Rload_addr = R11_scratch1, 804 Rarray = R12_scratch2, 805 Rtemp = R3_ARG1; 806 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 807 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 808 } 809 810 void TemplateTable::iload(int n) { 811 transition(vtos, itos); 812 813 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 814 } 815 816 void TemplateTable::lload(int n) { 817 transition(vtos, ltos); 818 819 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 820 } 821 822 void TemplateTable::fload(int n) { 823 transition(vtos, ftos); 824 825 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 826 } 827 828 void TemplateTable::dload(int n) { 829 transition(vtos, dtos); 830 831 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 832 } 833 834 void TemplateTable::aload(int n) { 835 transition(vtos, atos); 836 837 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 838 } 839 840 void TemplateTable::aload_0() { 841 aload_0_internal(); 842 } 843 844 void TemplateTable::nofast_aload_0() { 845 aload_0_internal(may_not_rewrite); 846 } 847 848 void TemplateTable::aload_0_internal(RewriteControl rc) { 849 transition(vtos, atos); 850 // According to bytecode histograms, the pairs: 851 // 852 // _aload_0, _fast_igetfield 853 // _aload_0, _fast_agetfield 854 // _aload_0, _fast_fgetfield 855 // 856 // occur frequently. If RewriteFrequentPairs is set, the (slow) 857 // _aload_0 bytecode checks if the next bytecode is either 858 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 859 // rewrites the current bytecode into a pair bytecode; otherwise it 860 // rewrites the current bytecode into _0 that doesn't do 861 // the pair check anymore. 862 // 863 // Note: If the next bytecode is _getfield, the rewrite must be 864 // delayed, otherwise we may miss an opportunity for a pair. 865 // 866 // Also rewrite frequent pairs 867 // aload_0, aload_1 868 // aload_0, iload_1 869 // These bytecodes with a small amount of code are most profitable 870 // to rewrite. 871 872 if (RewriteFrequentPairs && rc == may_rewrite) { 873 874 Label Lrewrite, Ldont_rewrite; 875 Register Rnext_byte = R3_ARG1, 876 Rrewrite_to = R6_ARG4, 877 Rscratch = R11_scratch1; 878 879 // Get next byte. 880 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 881 882 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 883 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 884 __ beq(CCR0, Ldont_rewrite); 885 886 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 887 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 888 __ beq(CCR1, Lrewrite); 889 890 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 891 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 892 __ beq(CCR0, Lrewrite); 893 894 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 895 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 896 __ beq(CCR1, Lrewrite); 897 898 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 899 900 __ bind(Lrewrite); 901 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 902 __ bind(Ldont_rewrite); 903 } 904 905 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 906 aload(0); 907 } 908 909 void TemplateTable::istore() { 910 transition(itos, vtos); 911 912 const Register Rindex = R11_scratch1; 913 locals_index(Rindex); 914 __ store_local_int(R17_tos, Rindex); 915 } 916 917 void TemplateTable::lstore() { 918 transition(ltos, vtos); 919 const Register Rindex = R11_scratch1; 920 locals_index(Rindex); 921 __ store_local_long(R17_tos, Rindex); 922 } 923 924 void TemplateTable::fstore() { 925 transition(ftos, vtos); 926 927 const Register Rindex = R11_scratch1; 928 locals_index(Rindex); 929 __ store_local_float(F15_ftos, Rindex); 930 } 931 932 void TemplateTable::dstore() { 933 transition(dtos, vtos); 934 935 const Register Rindex = R11_scratch1; 936 locals_index(Rindex); 937 __ store_local_double(F15_ftos, Rindex); 938 } 939 940 void TemplateTable::astore() { 941 transition(vtos, vtos); 942 943 const Register Rindex = R11_scratch1; 944 __ pop_ptr(); 945 __ verify_oop_or_return_address(R17_tos, Rindex); 946 locals_index(Rindex); 947 __ store_local_ptr(R17_tos, Rindex); 948 } 949 950 void TemplateTable::wide_istore() { 951 transition(vtos, vtos); 952 953 const Register Rindex = R11_scratch1; 954 __ pop_i(); 955 locals_index_wide(Rindex); 956 __ store_local_int(R17_tos, Rindex); 957 } 958 959 void TemplateTable::wide_lstore() { 960 transition(vtos, vtos); 961 962 const Register Rindex = R11_scratch1; 963 __ pop_l(); 964 locals_index_wide(Rindex); 965 __ store_local_long(R17_tos, Rindex); 966 } 967 968 void TemplateTable::wide_fstore() { 969 transition(vtos, vtos); 970 971 const Register Rindex = R11_scratch1; 972 __ pop_f(); 973 locals_index_wide(Rindex); 974 __ store_local_float(F15_ftos, Rindex); 975 } 976 977 void TemplateTable::wide_dstore() { 978 transition(vtos, vtos); 979 980 const Register Rindex = R11_scratch1; 981 __ pop_d(); 982 locals_index_wide(Rindex); 983 __ store_local_double(F15_ftos, Rindex); 984 } 985 986 void TemplateTable::wide_astore() { 987 transition(vtos, vtos); 988 989 const Register Rindex = R11_scratch1; 990 __ pop_ptr(); 991 __ verify_oop_or_return_address(R17_tos, Rindex); 992 locals_index_wide(Rindex); 993 __ store_local_ptr(R17_tos, Rindex); 994 } 995 996 void TemplateTable::iastore() { 997 transition(itos, vtos); 998 999 const Register Rindex = R3_ARG1, 1000 Rstore_addr = R4_ARG2, 1001 Rarray = R5_ARG3, 1002 Rtemp = R6_ARG4; 1003 __ pop_i(Rindex); 1004 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 1005 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 1006 } 1007 1008 void TemplateTable::lastore() { 1009 transition(ltos, vtos); 1010 1011 const Register Rindex = R3_ARG1, 1012 Rstore_addr = R4_ARG2, 1013 Rarray = R5_ARG3, 1014 Rtemp = R6_ARG4; 1015 __ pop_i(Rindex); 1016 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 1017 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 1018 } 1019 1020 void TemplateTable::fastore() { 1021 transition(ftos, vtos); 1022 1023 const Register Rindex = R3_ARG1, 1024 Rstore_addr = R4_ARG2, 1025 Rarray = R5_ARG3, 1026 Rtemp = R6_ARG4; 1027 __ pop_i(Rindex); 1028 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 1029 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 1030 } 1031 1032 void TemplateTable::dastore() { 1033 transition(dtos, vtos); 1034 1035 const Register Rindex = R3_ARG1, 1036 Rstore_addr = R4_ARG2, 1037 Rarray = R5_ARG3, 1038 Rtemp = R6_ARG4; 1039 __ pop_i(Rindex); 1040 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 1041 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 1042 } 1043 1044 // Pop 3 values from the stack and... 1045 void TemplateTable::aastore() { 1046 transition(vtos, vtos); 1047 1048 Label Lstore_ok, Lis_null, Ldone; 1049 const Register Rindex = R3_ARG1, 1050 Rarray = R4_ARG2, 1051 Rscratch = R11_scratch1, 1052 Rscratch2 = R12_scratch2, 1053 Rarray_klass = R5_ARG3, 1054 Rarray_element_klass = Rarray_klass, 1055 Rvalue_klass = R6_ARG4, 1056 Rstore_addr = R31; // Use register which survives VM call. 1057 1058 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 1059 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 1060 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 1061 1062 __ verify_oop(R17_tos); 1063 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 1064 // Rindex is dead! 1065 Register Rscratch3 = Rindex; 1066 1067 // Do array store check - check for NULL value first. 1068 __ cmpdi(CCR0, R17_tos, 0); 1069 __ beq(CCR0, Lis_null); 1070 1071 __ load_klass(Rarray_klass, Rarray); 1072 __ load_klass(Rvalue_klass, R17_tos); 1073 1074 // Do fast instanceof cache test. 1075 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 1076 1077 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 1078 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 1079 1080 // Fell through: subtype check failed => throw an exception. 1081 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 1082 __ mtctr(R11_scratch1); 1083 __ bctr(); 1084 1085 __ bind(Lis_null); 1086 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 1087 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 1088 __ profile_null_seen(Rscratch, Rscratch2); 1089 __ b(Ldone); 1090 1091 // Store is OK. 1092 __ bind(Lstore_ok); 1093 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 1094 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 1095 1096 __ bind(Ldone); 1097 // Adjust sp (pops array, index and value). 1098 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 1099 } 1100 1101 void TemplateTable::bastore() { 1102 transition(itos, vtos); 1103 1104 const Register Rindex = R11_scratch1, 1105 Rarray = R12_scratch2, 1106 Rscratch = R3_ARG1; 1107 __ pop_i(Rindex); 1108 __ pop_ptr(Rarray); 1109 // tos: val 1110 1111 // Need to check whether array is boolean or byte 1112 // since both types share the bastore bytecode. 1113 __ load_klass(Rscratch, Rarray); 1114 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 1115 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 1116 __ testbitdi(CCR0, R0, Rscratch, diffbit); 1117 Label L_skip; 1118 __ bfalse(CCR0, L_skip); 1119 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1120 __ bind(L_skip); 1121 1122 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 1123 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 1124 } 1125 1126 void TemplateTable::castore() { 1127 transition(itos, vtos); 1128 1129 const Register Rindex = R11_scratch1, 1130 Rarray = R12_scratch2, 1131 Rscratch = R3_ARG1; 1132 __ pop_i(Rindex); 1133 // tos: val 1134 // Rarray: array ptr (popped by index_check) 1135 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1136 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1137 } 1138 1139 void TemplateTable::sastore() { 1140 castore(); 1141 } 1142 1143 void TemplateTable::istore(int n) { 1144 transition(itos, vtos); 1145 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1146 } 1147 1148 void TemplateTable::lstore(int n) { 1149 transition(ltos, vtos); 1150 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1151 } 1152 1153 void TemplateTable::fstore(int n) { 1154 transition(ftos, vtos); 1155 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1156 } 1157 1158 void TemplateTable::dstore(int n) { 1159 transition(dtos, vtos); 1160 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1161 } 1162 1163 void TemplateTable::astore(int n) { 1164 transition(vtos, vtos); 1165 1166 __ pop_ptr(); 1167 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1168 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1169 } 1170 1171 void TemplateTable::pop() { 1172 transition(vtos, vtos); 1173 1174 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1175 } 1176 1177 void TemplateTable::pop2() { 1178 transition(vtos, vtos); 1179 1180 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1181 } 1182 1183 void TemplateTable::dup() { 1184 transition(vtos, vtos); 1185 1186 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1187 __ push_ptr(R11_scratch1); 1188 } 1189 1190 void TemplateTable::dup_x1() { 1191 transition(vtos, vtos); 1192 1193 Register Ra = R11_scratch1, 1194 Rb = R12_scratch2; 1195 // stack: ..., a, b 1196 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1197 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1198 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1199 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1200 __ push_ptr(Rb); 1201 // stack: ..., b, a, b 1202 } 1203 1204 void TemplateTable::dup_x2() { 1205 transition(vtos, vtos); 1206 1207 Register Ra = R11_scratch1, 1208 Rb = R12_scratch2, 1209 Rc = R3_ARG1; 1210 1211 // stack: ..., a, b, c 1212 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1213 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1214 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1215 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1216 // stack: ..., c, b, c 1217 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1218 // stack: ..., c, a, c 1219 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1220 __ push_ptr(Rc); // push c 1221 // stack: ..., c, a, b, c 1222 } 1223 1224 void TemplateTable::dup2() { 1225 transition(vtos, vtos); 1226 1227 Register Ra = R11_scratch1, 1228 Rb = R12_scratch2; 1229 // stack: ..., a, b 1230 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1231 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1232 __ push_2ptrs(Ra, Rb); 1233 // stack: ..., a, b, a, b 1234 } 1235 1236 void TemplateTable::dup2_x1() { 1237 transition(vtos, vtos); 1238 1239 Register Ra = R11_scratch1, 1240 Rb = R12_scratch2, 1241 Rc = R3_ARG1; 1242 // stack: ..., a, b, c 1243 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1244 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1245 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1246 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1247 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1248 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1249 // stack: ..., b, c, a 1250 __ push_2ptrs(Rb, Rc); 1251 // stack: ..., b, c, a, b, c 1252 } 1253 1254 void TemplateTable::dup2_x2() { 1255 transition(vtos, vtos); 1256 1257 Register Ra = R11_scratch1, 1258 Rb = R12_scratch2, 1259 Rc = R3_ARG1, 1260 Rd = R4_ARG2; 1261 // stack: ..., a, b, c, d 1262 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1263 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1264 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1265 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1266 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1267 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1268 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1269 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1270 // stack: ..., c, d, a, b 1271 __ push_2ptrs(Rc, Rd); 1272 // stack: ..., c, d, a, b, c, d 1273 } 1274 1275 void TemplateTable::swap() { 1276 transition(vtos, vtos); 1277 // stack: ..., a, b 1278 1279 Register Ra = R11_scratch1, 1280 Rb = R12_scratch2; 1281 // stack: ..., a, b 1282 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1283 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1284 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1285 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1286 // stack: ..., b, a 1287 } 1288 1289 void TemplateTable::iop2(Operation op) { 1290 transition(itos, itos); 1291 1292 Register Rscratch = R11_scratch1; 1293 1294 __ pop_i(Rscratch); 1295 // tos = number of bits to shift 1296 // Rscratch = value to shift 1297 switch (op) { 1298 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1299 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1300 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1301 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1302 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1303 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1304 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1305 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1306 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1307 default: ShouldNotReachHere(); 1308 } 1309 } 1310 1311 void TemplateTable::lop2(Operation op) { 1312 transition(ltos, ltos); 1313 1314 Register Rscratch = R11_scratch1; 1315 __ pop_l(Rscratch); 1316 switch (op) { 1317 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1318 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1319 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1320 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1321 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1322 default: ShouldNotReachHere(); 1323 } 1324 } 1325 1326 void TemplateTable::idiv() { 1327 transition(itos, itos); 1328 1329 Label Lnormal, Lexception, Ldone; 1330 Register Rdividend = R11_scratch1; // Used by irem. 1331 1332 __ addi(R0, R17_tos, 1); 1333 __ cmplwi(CCR0, R0, 2); 1334 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1335 1336 __ cmpwi(CCR1, R17_tos, 0); 1337 __ beq(CCR1, Lexception); // divisor == 0 1338 1339 __ pop_i(Rdividend); 1340 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1341 __ b(Ldone); 1342 1343 __ bind(Lexception); 1344 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1345 __ mtctr(R11_scratch1); 1346 __ bctr(); 1347 1348 __ align(32, 12); 1349 __ bind(Lnormal); 1350 __ pop_i(Rdividend); 1351 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1352 __ bind(Ldone); 1353 } 1354 1355 void TemplateTable::irem() { 1356 transition(itos, itos); 1357 1358 __ mr(R12_scratch2, R17_tos); 1359 idiv(); 1360 __ mullw(R17_tos, R17_tos, R12_scratch2); 1361 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1362 } 1363 1364 void TemplateTable::lmul() { 1365 transition(ltos, ltos); 1366 1367 __ pop_l(R11_scratch1); 1368 __ mulld(R17_tos, R11_scratch1, R17_tos); 1369 } 1370 1371 void TemplateTable::ldiv() { 1372 transition(ltos, ltos); 1373 1374 Label Lnormal, Lexception, Ldone; 1375 Register Rdividend = R11_scratch1; // Used by lrem. 1376 1377 __ addi(R0, R17_tos, 1); 1378 __ cmpldi(CCR0, R0, 2); 1379 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1380 1381 __ cmpdi(CCR1, R17_tos, 0); 1382 __ beq(CCR1, Lexception); // divisor == 0 1383 1384 __ pop_l(Rdividend); 1385 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1386 __ b(Ldone); 1387 1388 __ bind(Lexception); 1389 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1390 __ mtctr(R11_scratch1); 1391 __ bctr(); 1392 1393 __ align(32, 12); 1394 __ bind(Lnormal); 1395 __ pop_l(Rdividend); 1396 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1397 __ bind(Ldone); 1398 } 1399 1400 void TemplateTable::lrem() { 1401 transition(ltos, ltos); 1402 1403 __ mr(R12_scratch2, R17_tos); 1404 ldiv(); 1405 __ mulld(R17_tos, R17_tos, R12_scratch2); 1406 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1407 } 1408 1409 void TemplateTable::lshl() { 1410 transition(itos, ltos); 1411 1412 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1413 __ pop_l(R11_scratch1); 1414 __ sld(R17_tos, R11_scratch1, R17_tos); 1415 } 1416 1417 void TemplateTable::lshr() { 1418 transition(itos, ltos); 1419 1420 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1421 __ pop_l(R11_scratch1); 1422 __ srad(R17_tos, R11_scratch1, R17_tos); 1423 } 1424 1425 void TemplateTable::lushr() { 1426 transition(itos, ltos); 1427 1428 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1429 __ pop_l(R11_scratch1); 1430 __ srd(R17_tos, R11_scratch1, R17_tos); 1431 } 1432 1433 void TemplateTable::fop2(Operation op) { 1434 transition(ftos, ftos); 1435 1436 switch (op) { 1437 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1438 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1439 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1440 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1441 case rem: 1442 __ pop_f(F1_ARG1); 1443 __ fmr(F2_ARG2, F15_ftos); 1444 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1445 __ fmr(F15_ftos, F1_RET); 1446 break; 1447 1448 default: ShouldNotReachHere(); 1449 } 1450 } 1451 1452 void TemplateTable::dop2(Operation op) { 1453 transition(dtos, dtos); 1454 1455 switch (op) { 1456 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1457 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1458 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1459 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1460 case rem: 1461 __ pop_d(F1_ARG1); 1462 __ fmr(F2_ARG2, F15_ftos); 1463 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1464 __ fmr(F15_ftos, F1_RET); 1465 break; 1466 1467 default: ShouldNotReachHere(); 1468 } 1469 } 1470 1471 // Negate the value in the TOS cache. 1472 void TemplateTable::ineg() { 1473 transition(itos, itos); 1474 1475 __ neg(R17_tos, R17_tos); 1476 } 1477 1478 // Negate the value in the TOS cache. 1479 void TemplateTable::lneg() { 1480 transition(ltos, ltos); 1481 1482 __ neg(R17_tos, R17_tos); 1483 } 1484 1485 void TemplateTable::fneg() { 1486 transition(ftos, ftos); 1487 1488 __ fneg(F15_ftos, F15_ftos); 1489 } 1490 1491 void TemplateTable::dneg() { 1492 transition(dtos, dtos); 1493 1494 __ fneg(F15_ftos, F15_ftos); 1495 } 1496 1497 // Increments a local variable in place. 1498 void TemplateTable::iinc() { 1499 transition(vtos, vtos); 1500 1501 const Register Rindex = R11_scratch1, 1502 Rincrement = R0, 1503 Rvalue = R12_scratch2; 1504 1505 locals_index(Rindex); // Load locals index from bytecode stream. 1506 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1507 __ extsb(Rincrement, Rincrement); 1508 1509 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1510 1511 __ add(Rvalue, Rincrement, Rvalue); 1512 __ stw(Rvalue, 0, Rindex); 1513 } 1514 1515 void TemplateTable::wide_iinc() { 1516 transition(vtos, vtos); 1517 1518 Register Rindex = R11_scratch1, 1519 Rlocals_addr = Rindex, 1520 Rincr = R12_scratch2; 1521 locals_index_wide(Rindex); 1522 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1523 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1524 __ add(R17_tos, Rincr, R17_tos); 1525 __ stw(R17_tos, 0, Rlocals_addr); 1526 } 1527 1528 void TemplateTable::convert() { 1529 // %%%%% Factor this first part accross platforms 1530 #ifdef ASSERT 1531 TosState tos_in = ilgl; 1532 TosState tos_out = ilgl; 1533 switch (bytecode()) { 1534 case Bytecodes::_i2l: // fall through 1535 case Bytecodes::_i2f: // fall through 1536 case Bytecodes::_i2d: // fall through 1537 case Bytecodes::_i2b: // fall through 1538 case Bytecodes::_i2c: // fall through 1539 case Bytecodes::_i2s: tos_in = itos; break; 1540 case Bytecodes::_l2i: // fall through 1541 case Bytecodes::_l2f: // fall through 1542 case Bytecodes::_l2d: tos_in = ltos; break; 1543 case Bytecodes::_f2i: // fall through 1544 case Bytecodes::_f2l: // fall through 1545 case Bytecodes::_f2d: tos_in = ftos; break; 1546 case Bytecodes::_d2i: // fall through 1547 case Bytecodes::_d2l: // fall through 1548 case Bytecodes::_d2f: tos_in = dtos; break; 1549 default : ShouldNotReachHere(); 1550 } 1551 switch (bytecode()) { 1552 case Bytecodes::_l2i: // fall through 1553 case Bytecodes::_f2i: // fall through 1554 case Bytecodes::_d2i: // fall through 1555 case Bytecodes::_i2b: // fall through 1556 case Bytecodes::_i2c: // fall through 1557 case Bytecodes::_i2s: tos_out = itos; break; 1558 case Bytecodes::_i2l: // fall through 1559 case Bytecodes::_f2l: // fall through 1560 case Bytecodes::_d2l: tos_out = ltos; break; 1561 case Bytecodes::_i2f: // fall through 1562 case Bytecodes::_l2f: // fall through 1563 case Bytecodes::_d2f: tos_out = ftos; break; 1564 case Bytecodes::_i2d: // fall through 1565 case Bytecodes::_l2d: // fall through 1566 case Bytecodes::_f2d: tos_out = dtos; break; 1567 default : ShouldNotReachHere(); 1568 } 1569 transition(tos_in, tos_out); 1570 #endif 1571 1572 // Conversion 1573 Label done; 1574 switch (bytecode()) { 1575 case Bytecodes::_i2l: 1576 __ extsw(R17_tos, R17_tos); 1577 break; 1578 1579 case Bytecodes::_l2i: 1580 // Nothing to do, we'll continue to work with the lower bits. 1581 break; 1582 1583 case Bytecodes::_i2b: 1584 __ extsb(R17_tos, R17_tos); 1585 break; 1586 1587 case Bytecodes::_i2c: 1588 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1589 break; 1590 1591 case Bytecodes::_i2s: 1592 __ extsh(R17_tos, R17_tos); 1593 break; 1594 1595 case Bytecodes::_i2d: 1596 __ extsw(R17_tos, R17_tos); 1597 case Bytecodes::_l2d: 1598 __ move_l_to_d(); 1599 __ fcfid(F15_ftos, F15_ftos); 1600 break; 1601 1602 case Bytecodes::_i2f: 1603 __ extsw(R17_tos, R17_tos); 1604 __ move_l_to_d(); 1605 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1606 // Comment: alternatively, load with sign extend could be done by lfiwax. 1607 __ fcfids(F15_ftos, F15_ftos); 1608 } else { 1609 __ fcfid(F15_ftos, F15_ftos); 1610 __ frsp(F15_ftos, F15_ftos); 1611 } 1612 break; 1613 1614 case Bytecodes::_l2f: 1615 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1616 __ move_l_to_d(); 1617 __ fcfids(F15_ftos, F15_ftos); 1618 } else { 1619 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1620 __ mr(R3_ARG1, R17_tos); 1621 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1622 __ fmr(F15_ftos, F1_RET); 1623 } 1624 break; 1625 1626 case Bytecodes::_f2d: 1627 // empty 1628 break; 1629 1630 case Bytecodes::_d2f: 1631 __ frsp(F15_ftos, F15_ftos); 1632 break; 1633 1634 case Bytecodes::_d2i: 1635 case Bytecodes::_f2i: 1636 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1637 __ li(R17_tos, 0); // 0 in case of NAN 1638 __ bso(CCR0, done); 1639 __ fctiwz(F15_ftos, F15_ftos); 1640 __ move_d_to_l(); 1641 break; 1642 1643 case Bytecodes::_d2l: 1644 case Bytecodes::_f2l: 1645 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1646 __ li(R17_tos, 0); // 0 in case of NAN 1647 __ bso(CCR0, done); 1648 __ fctidz(F15_ftos, F15_ftos); 1649 __ move_d_to_l(); 1650 break; 1651 1652 default: ShouldNotReachHere(); 1653 } 1654 __ bind(done); 1655 } 1656 1657 // Long compare 1658 void TemplateTable::lcmp() { 1659 transition(ltos, itos); 1660 1661 const Register Rscratch = R11_scratch1; 1662 __ pop_l(Rscratch); // first operand, deeper in stack 1663 1664 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1665 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1666 __ srwi(Rscratch, R17_tos, 30); 1667 __ srawi(R17_tos, R17_tos, 31); 1668 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1669 } 1670 1671 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1672 // unordered_result == -1 => fcmpl or dcmpl 1673 // unordered_result == 1 => fcmpg or dcmpg 1674 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1675 const FloatRegister Rfirst = F0_SCRATCH, 1676 Rsecond = F15_ftos; 1677 const Register Rscratch = R11_scratch1; 1678 1679 if (is_float) { 1680 __ pop_f(Rfirst); 1681 } else { 1682 __ pop_d(Rfirst); 1683 } 1684 1685 Label Lunordered, Ldone; 1686 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1687 if (unordered_result) { 1688 __ bso(CCR0, Lunordered); 1689 } 1690 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1691 __ srwi(Rscratch, R17_tos, 30); 1692 __ srawi(R17_tos, R17_tos, 31); 1693 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1694 if (unordered_result) { 1695 __ b(Ldone); 1696 __ bind(Lunordered); 1697 __ load_const_optimized(R17_tos, unordered_result); 1698 } 1699 __ bind(Ldone); 1700 } 1701 1702 // Branch_conditional which takes TemplateTable::Condition. 1703 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1704 bool positive = false; 1705 Assembler::Condition cond = Assembler::equal; 1706 switch (cc) { 1707 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1708 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1709 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1710 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1711 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1712 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1713 default: ShouldNotReachHere(); 1714 } 1715 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1716 int bi = Assembler::bi0(crx, cond); 1717 __ bc(bo, bi, L); 1718 } 1719 1720 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1721 1722 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1723 __ verify_thread(); 1724 1725 const Register Rscratch1 = R11_scratch1, 1726 Rscratch2 = R12_scratch2, 1727 Rscratch3 = R3_ARG1, 1728 R4_counters = R4_ARG2, 1729 bumped_count = R31, 1730 Rdisp = R22_tmp2; 1731 1732 __ profile_taken_branch(Rscratch1, bumped_count); 1733 1734 // Get (wide) offset. 1735 if (is_wide) { 1736 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1737 } else { 1738 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1739 } 1740 1741 // -------------------------------------------------------------------------- 1742 // Handle all the JSR stuff here, then exit. 1743 // It's much shorter and cleaner than intermingling with the 1744 // non-JSR normal-branch stuff occurring below. 1745 if (is_jsr) { 1746 // Compute return address as bci in Otos_i. 1747 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1748 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1749 __ subf(R17_tos, Rscratch1, Rscratch2); 1750 1751 // Bump bcp to target of JSR. 1752 __ add(R14_bcp, Rdisp, R14_bcp); 1753 // Push returnAddress for "ret" on stack. 1754 __ push_ptr(R17_tos); 1755 // And away we go! 1756 __ dispatch_next(vtos, 0 ,true); 1757 return; 1758 } 1759 1760 // -------------------------------------------------------------------------- 1761 // Normal (non-jsr) branch handling 1762 1763 // Bump bytecode pointer by displacement (take the branch). 1764 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1765 1766 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1767 if (increment_invocation_counter_for_backward_branches) { 1768 Label Lforward; 1769 1770 // Check branch direction. 1771 __ cmpdi(CCR0, Rdisp, 0); 1772 __ bgt(CCR0, Lforward); 1773 1774 __ get_method_counters(R19_method, R4_counters, Lforward); 1775 1776 if (TieredCompilation) { 1777 Label Lno_mdo, Loverflow; 1778 const int increment = InvocationCounter::count_increment; 1779 if (ProfileInterpreter) { 1780 Register Rmdo = Rscratch1; 1781 1782 // If no method data exists, go to profile_continue. 1783 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1784 __ cmpdi(CCR0, Rmdo, 0); 1785 __ beq(CCR0, Lno_mdo); 1786 1787 // Increment backedge counter in the MDO. 1788 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1789 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1790 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo); 1791 __ addi(Rscratch2, Rscratch2, increment); 1792 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1793 if (UseOnStackReplacement) { 1794 __ and_(Rscratch3, Rscratch2, Rscratch3); 1795 __ bne(CCR0, Lforward); 1796 __ b(Loverflow); 1797 } else { 1798 __ b(Lforward); 1799 } 1800 } 1801 1802 // If there's no MDO, increment counter in method. 1803 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1804 __ bind(Lno_mdo); 1805 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1806 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters); 1807 __ addi(Rscratch2, Rscratch2, increment); 1808 __ stw(Rscratch2, mo_bc_offs, R4_counters); 1809 if (UseOnStackReplacement) { 1810 __ and_(Rscratch3, Rscratch2, Rscratch3); 1811 __ bne(CCR0, Lforward); 1812 } else { 1813 __ b(Lforward); 1814 } 1815 __ bind(Loverflow); 1816 1817 // Notify point for loop, pass branch bytecode. 1818 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp). 1819 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 1820 1821 // Was an OSR adapter generated? 1822 __ cmpdi(CCR0, R3_RET, 0); 1823 __ beq(CCR0, Lforward); 1824 1825 // Has the nmethod been invalidated already? 1826 __ lbz(R0, nmethod::state_offset(), R3_RET); 1827 __ cmpwi(CCR0, R0, nmethod::in_use); 1828 __ bne(CCR0, Lforward); 1829 1830 // Migrate the interpreter frame off of the stack. 1831 // We can use all registers because we will not return to interpreter from this point. 1832 1833 // Save nmethod. 1834 const Register osr_nmethod = R31; 1835 __ mr(osr_nmethod, R3_RET); 1836 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1837 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1838 __ reset_last_Java_frame(); 1839 // OSR buffer is in ARG1. 1840 1841 // Remove the interpreter frame. 1842 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1843 1844 // Jump to the osr code. 1845 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1846 __ mtlr(R0); 1847 __ mtctr(R11_scratch1); 1848 __ bctr(); 1849 1850 } else { 1851 1852 const Register invoke_ctr = Rscratch1; 1853 // Update Backedge branch separately from invocations. 1854 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1855 1856 if (ProfileInterpreter) { 1857 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward); 1858 if (UseOnStackReplacement) { 1859 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2); 1860 } 1861 } else { 1862 if (UseOnStackReplacement) { 1863 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2); 1864 } 1865 } 1866 } 1867 1868 __ bind(Lforward); 1869 } 1870 __ dispatch_next(vtos, 0, true); 1871 } 1872 1873 // Helper function for if_cmp* methods below. 1874 // Factored out common compare and branch code. 1875 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1876 Label Lnot_taken; 1877 // Note: The condition code we get is the condition under which we 1878 // *fall through*! So we have to inverse the CC here. 1879 1880 if (is_jint) { 1881 if (cmp0) { 1882 __ cmpwi(CCR0, Rfirst, 0); 1883 } else { 1884 __ cmpw(CCR0, Rfirst, Rsecond); 1885 } 1886 } else { 1887 if (cmp0) { 1888 __ cmpdi(CCR0, Rfirst, 0); 1889 } else { 1890 __ cmpd(CCR0, Rfirst, Rsecond); 1891 } 1892 } 1893 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1894 1895 // Conition is false => Jump! 1896 branch(false, false); 1897 1898 // Condition is not true => Continue. 1899 __ align(32, 12); 1900 __ bind(Lnot_taken); 1901 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1902 } 1903 1904 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1905 void TemplateTable::if_0cmp(Condition cc) { 1906 transition(itos, vtos); 1907 1908 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1909 } 1910 1911 // Compare integer values and fall through if CC holds, branch away otherwise. 1912 // 1913 // Interface: 1914 // - Rfirst: First operand (older stack value) 1915 // - tos: Second operand (younger stack value) 1916 void TemplateTable::if_icmp(Condition cc) { 1917 transition(itos, vtos); 1918 1919 const Register Rfirst = R0, 1920 Rsecond = R17_tos; 1921 1922 __ pop_i(Rfirst); 1923 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1924 } 1925 1926 void TemplateTable::if_nullcmp(Condition cc) { 1927 transition(atos, vtos); 1928 1929 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1930 } 1931 1932 void TemplateTable::if_acmp(Condition cc) { 1933 transition(atos, vtos); 1934 1935 const Register Rfirst = R0, 1936 Rsecond = R17_tos; 1937 1938 __ pop_ptr(Rfirst); 1939 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1940 } 1941 1942 void TemplateTable::ret() { 1943 locals_index(R11_scratch1); 1944 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1945 1946 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1947 1948 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1949 __ add(R11_scratch1, R17_tos, R11_scratch1); 1950 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1951 __ dispatch_next(vtos, 0, true); 1952 } 1953 1954 void TemplateTable::wide_ret() { 1955 transition(vtos, vtos); 1956 1957 const Register Rindex = R3_ARG1, 1958 Rscratch1 = R11_scratch1, 1959 Rscratch2 = R12_scratch2; 1960 1961 locals_index_wide(Rindex); 1962 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1963 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1964 // Tos now contains the bci, compute the bcp from that. 1965 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1966 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1967 __ add(R14_bcp, Rscratch1, Rscratch2); 1968 __ dispatch_next(vtos, 0, true); 1969 } 1970 1971 void TemplateTable::tableswitch() { 1972 transition(itos, vtos); 1973 1974 Label Ldispatch, Ldefault_case; 1975 Register Rlow_byte = R3_ARG1, 1976 Rindex = Rlow_byte, 1977 Rhigh_byte = R4_ARG2, 1978 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1979 Rscratch1 = R11_scratch1, 1980 Rscratch2 = R12_scratch2, 1981 Roffset = R6_ARG4; 1982 1983 // Align bcp. 1984 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1985 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1986 1987 // Load lo & hi. 1988 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1989 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1990 1991 // Check for default case (=index outside [low,high]). 1992 __ cmpw(CCR0, R17_tos, Rlow_byte); 1993 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1994 __ blt(CCR0, Ldefault_case); 1995 __ bgt(CCR1, Ldefault_case); 1996 1997 // Lookup dispatch offset. 1998 __ sub(Rindex, R17_tos, Rlow_byte); 1999 __ extsw(Rindex, Rindex); 2000 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 2001 __ sldi(Rindex, Rindex, LogBytesPerInt); 2002 __ addi(Rindex, Rindex, 3 * BytesPerInt); 2003 #if defined(VM_LITTLE_ENDIAN) 2004 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 2005 __ extsw(Roffset, Roffset); 2006 #else 2007 __ lwax(Roffset, Rdef_offset_addr, Rindex); 2008 #endif 2009 __ b(Ldispatch); 2010 2011 __ bind(Ldefault_case); 2012 __ profile_switch_default(Rhigh_byte, Rscratch1); 2013 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 2014 2015 __ bind(Ldispatch); 2016 2017 __ add(R14_bcp, Roffset, R14_bcp); 2018 __ dispatch_next(vtos, 0, true); 2019 } 2020 2021 void TemplateTable::lookupswitch() { 2022 transition(itos, itos); 2023 __ stop("lookupswitch bytecode should have been rewritten"); 2024 } 2025 2026 // Table switch using linear search through cases. 2027 // Bytecode stream format: 2028 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2029 // Note: Everything is big-endian format here. 2030 void TemplateTable::fast_linearswitch() { 2031 transition(itos, vtos); 2032 2033 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 2034 Register Rcount = R3_ARG1, 2035 Rcurrent_pair = R4_ARG2, 2036 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 2037 Roffset = R31, // Might need to survive C call. 2038 Rvalue = R12_scratch2, 2039 Rscratch = R11_scratch1, 2040 Rcmp_value = R17_tos; 2041 2042 // Align bcp. 2043 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 2044 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 2045 2046 // Setup loop counter and limit. 2047 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2048 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 2049 2050 __ mtctr(Rcount); 2051 __ cmpwi(CCR0, Rcount, 0); 2052 __ bne(CCR0, Lloop_entry); 2053 2054 // Default case 2055 __ bind(Ldefault_case); 2056 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 2057 if (ProfileInterpreter) { 2058 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 2059 } 2060 __ b(Lcontinue_execution); 2061 2062 // Next iteration 2063 __ bind(Lsearch_loop); 2064 __ bdz(Ldefault_case); 2065 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 2066 __ bind(Lloop_entry); 2067 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 2068 __ cmpw(CCR0, Rvalue, Rcmp_value); 2069 __ bne(CCR0, Lsearch_loop); 2070 2071 // Found, load offset. 2072 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 2073 // Calculate case index and profile 2074 __ mfctr(Rcurrent_pair); 2075 if (ProfileInterpreter) { 2076 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 2077 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 2078 } 2079 2080 __ bind(Lcontinue_execution); 2081 __ add(R14_bcp, Roffset, R14_bcp); 2082 __ dispatch_next(vtos, 0, true); 2083 } 2084 2085 // Table switch using binary search (value/offset pairs are ordered). 2086 // Bytecode stream format: 2087 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2088 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 2089 void TemplateTable::fast_binaryswitch() { 2090 2091 transition(itos, vtos); 2092 // Implementation using the following core algorithm: (copied from Intel) 2093 // 2094 // int binary_search(int key, LookupswitchPair* array, int n) { 2095 // // Binary search according to "Methodik des Programmierens" by 2096 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2097 // int i = 0; 2098 // int j = n; 2099 // while (i+1 < j) { 2100 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2101 // // with Q: for all i: 0 <= i < n: key < a[i] 2102 // // where a stands for the array and assuming that the (inexisting) 2103 // // element a[n] is infinitely big. 2104 // int h = (i + j) >> 1; 2105 // // i < h < j 2106 // if (key < array[h].fast_match()) { 2107 // j = h; 2108 // } else { 2109 // i = h; 2110 // } 2111 // } 2112 // // R: a[i] <= key < a[i+1] or Q 2113 // // (i.e., if key is within array, i is the correct index) 2114 // return i; 2115 // } 2116 2117 // register allocation 2118 const Register Rkey = R17_tos; // already set (tosca) 2119 const Register Rarray = R3_ARG1; 2120 const Register Ri = R4_ARG2; 2121 const Register Rj = R5_ARG3; 2122 const Register Rh = R6_ARG4; 2123 const Register Rscratch = R11_scratch1; 2124 2125 const int log_entry_size = 3; 2126 const int entry_size = 1 << log_entry_size; 2127 2128 Label found; 2129 2130 // Find Array start, 2131 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2132 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2133 2134 // initialize i & j 2135 __ li(Ri,0); 2136 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2137 2138 // and start. 2139 Label entry; 2140 __ b(entry); 2141 2142 // binary search loop 2143 { Label loop; 2144 __ bind(loop); 2145 // int h = (i + j) >> 1; 2146 __ srdi(Rh, Rh, 1); 2147 // if (key < array[h].fast_match()) { 2148 // j = h; 2149 // } else { 2150 // i = h; 2151 // } 2152 __ sldi(Rscratch, Rh, log_entry_size); 2153 #if defined(VM_LITTLE_ENDIAN) 2154 __ lwbrx(Rscratch, Rscratch, Rarray); 2155 #else 2156 __ lwzx(Rscratch, Rscratch, Rarray); 2157 #endif 2158 2159 // if (key < current value) 2160 // Rh = Rj 2161 // else 2162 // Rh = Ri 2163 Label Lgreater; 2164 __ cmpw(CCR0, Rkey, Rscratch); 2165 __ bge(CCR0, Lgreater); 2166 __ mr(Rj, Rh); 2167 __ b(entry); 2168 __ bind(Lgreater); 2169 __ mr(Ri, Rh); 2170 2171 // while (i+1 < j) 2172 __ bind(entry); 2173 __ addi(Rscratch, Ri, 1); 2174 __ cmpw(CCR0, Rscratch, Rj); 2175 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2176 2177 __ blt(CCR0, loop); 2178 } 2179 2180 // End of binary search, result index is i (must check again!). 2181 Label default_case; 2182 Label continue_execution; 2183 if (ProfileInterpreter) { 2184 __ mr(Rh, Ri); // Save index in i for profiling. 2185 } 2186 // Ri = value offset 2187 __ sldi(Ri, Ri, log_entry_size); 2188 __ add(Ri, Ri, Rarray); 2189 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2190 2191 Label not_found; 2192 // Ri = offset offset 2193 __ cmpw(CCR0, Rkey, Rscratch); 2194 __ beq(CCR0, not_found); 2195 // entry not found -> j = default offset 2196 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2197 __ b(default_case); 2198 2199 __ bind(not_found); 2200 // entry found -> j = offset 2201 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2202 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2203 2204 if (ProfileInterpreter) { 2205 __ b(continue_execution); 2206 } 2207 2208 __ bind(default_case); // fall through (if not profiling) 2209 __ profile_switch_default(Ri, Rscratch); 2210 2211 __ bind(continue_execution); 2212 2213 __ extsw(Rj, Rj); 2214 __ add(R14_bcp, Rj, R14_bcp); 2215 __ dispatch_next(vtos, 0 , true); 2216 } 2217 2218 void TemplateTable::_return(TosState state) { 2219 transition(state, state); 2220 assert(_desc->calls_vm(), 2221 "inconsistent calls_vm information"); // call in remove_activation 2222 2223 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2224 2225 Register Rscratch = R11_scratch1, 2226 Rklass = R12_scratch2, 2227 Rklass_flags = Rklass; 2228 Label Lskip_register_finalizer; 2229 2230 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2231 assert(state == vtos, "only valid state"); 2232 __ ld(R17_tos, 0, R18_locals); 2233 2234 // Load klass of this obj. 2235 __ load_klass(Rklass, R17_tos); 2236 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2237 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2238 __ bfalse(CCR0, Lskip_register_finalizer); 2239 2240 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2241 2242 __ align(32, 12); 2243 __ bind(Lskip_register_finalizer); 2244 } 2245 2246 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 2247 Label no_safepoint; 2248 __ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread); 2249 __ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit()); 2250 __ beq(CCR0, no_safepoint); 2251 __ push(state); 2252 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2253 __ pop(state); 2254 __ bind(no_safepoint); 2255 } 2256 2257 // Move the result value into the correct register and remove memory stack frame. 2258 __ remove_activation(state, /* throw_monitor_exception */ true); 2259 // Restoration of lr done by remove_activation. 2260 switch (state) { 2261 // Narrow result if state is itos but result type is smaller. 2262 // Need to narrow in the return bytecode rather than in generate_return_entry 2263 // since compiled code callers expect the result to already be narrowed. 2264 case itos: __ narrow(R17_tos); /* fall through */ 2265 case ltos: 2266 case atos: __ mr(R3_RET, R17_tos); break; 2267 case ftos: 2268 case dtos: __ fmr(F1_RET, F15_ftos); break; 2269 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2270 // to get visible before the reference to the object gets stored anywhere. 2271 __ membar(Assembler::StoreStore); break; 2272 default : ShouldNotReachHere(); 2273 } 2274 __ blr(); 2275 } 2276 2277 // ============================================================================ 2278 // Constant pool cache access 2279 // 2280 // Memory ordering: 2281 // 2282 // Like done in C++ interpreter, we load the fields 2283 // - _indices 2284 // - _f12_oop 2285 // acquired, because these are asked if the cache is already resolved. We don't 2286 // want to float loads above this check. 2287 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2288 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2289 2290 // Call into the VM if call site is not yet resolved 2291 // 2292 // Input regs: 2293 // - None, all passed regs are outputs. 2294 // 2295 // Returns: 2296 // - Rcache: The const pool cache entry that contains the resolved result. 2297 // - Rresult: Either noreg or output for f1/f2. 2298 // 2299 // Kills: 2300 // - Rscratch 2301 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2302 2303 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2304 Label Lresolved, Ldone; 2305 2306 Bytecodes::Code code = bytecode(); 2307 switch (code) { 2308 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2309 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2310 } 2311 2312 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2313 // We are resolved if the indices offset contains the current bytecode. 2314 #if defined(VM_LITTLE_ENDIAN) 2315 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2316 #else 2317 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2318 #endif 2319 // Acquire by cmp-br-isync (see below). 2320 __ cmpdi(CCR0, Rscratch, (int)code); 2321 __ beq(CCR0, Lresolved); 2322 2323 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2324 __ li(R4_ARG2, code); 2325 __ call_VM(noreg, entry, R4_ARG2, true); 2326 2327 // Update registers with resolved info. 2328 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2329 __ b(Ldone); 2330 2331 __ bind(Lresolved); 2332 __ isync(); // Order load wrt. succeeding loads. 2333 __ bind(Ldone); 2334 } 2335 2336 // Load the constant pool cache entry at field accesses into registers. 2337 // The Rcache and Rindex registers must be set before call. 2338 // Input: 2339 // - Rcache, Rindex 2340 // Output: 2341 // - Robj, Roffset, Rflags 2342 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2343 Register Rcache, 2344 Register Rindex /* unused on PPC64 */, 2345 Register Roffset, 2346 Register Rflags, 2347 bool is_static = false) { 2348 assert_different_registers(Rcache, Rflags, Roffset); 2349 // assert(Rindex == noreg, "parameter not used on PPC64"); 2350 2351 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2352 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2353 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2354 if (is_static) { 2355 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2356 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2357 __ resolve_oop_handle(Robj); 2358 // Acquire not needed here. Following access has an address dependency on this value. 2359 } 2360 } 2361 2362 // Load the constant pool cache entry at invokes into registers. 2363 // Resolve if necessary. 2364 2365 // Input Registers: 2366 // - None, bcp is used, though 2367 // 2368 // Return registers: 2369 // - Rmethod (f1 field or f2 if invokevirtual) 2370 // - Ritable_index (f2 field) 2371 // - Rflags (flags field) 2372 // 2373 // Kills: 2374 // - R21 2375 // 2376 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2377 Register Rmethod, 2378 Register Ritable_index, 2379 Register Rflags, 2380 bool is_invokevirtual, 2381 bool is_invokevfinal, 2382 bool is_invokedynamic) { 2383 2384 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2385 // Determine constant pool cache field offsets. 2386 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2387 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2388 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2389 // Access constant pool cache fields. 2390 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2391 2392 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2393 2394 if (is_invokevfinal) { 2395 assert(Ritable_index == noreg, "register not used"); 2396 // Already resolved. 2397 __ get_cache_and_index_at_bcp(Rcache, 1); 2398 } else { 2399 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2400 } 2401 2402 __ ld(Rmethod, method_offset, Rcache); 2403 __ ld(Rflags, flags_offset, Rcache); 2404 2405 if (Ritable_index != noreg) { 2406 __ ld(Ritable_index, index_offset, Rcache); 2407 } 2408 } 2409 2410 // ============================================================================ 2411 // Field access 2412 2413 // Volatile variables demand their effects be made known to all CPU's 2414 // in order. Store buffers on most chips allow reads & writes to 2415 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2416 // without some kind of memory barrier (i.e., it's not sufficient that 2417 // the interpreter does not reorder volatile references, the hardware 2418 // also must not reorder them). 2419 // 2420 // According to the new Java Memory Model (JMM): 2421 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2422 // writes act as aquire & release, so: 2423 // (2) A read cannot let unrelated NON-volatile memory refs that 2424 // happen after the read float up to before the read. It's OK for 2425 // non-volatile memory refs that happen before the volatile read to 2426 // float down below it. 2427 // (3) Similar a volatile write cannot let unrelated NON-volatile 2428 // memory refs that happen BEFORE the write float down to after the 2429 // write. It's OK for non-volatile memory refs that happen after the 2430 // volatile write to float up before it. 2431 // 2432 // We only put in barriers around volatile refs (they are expensive), 2433 // not _between_ memory refs (that would require us to track the 2434 // flavor of the previous memory refs). Requirements (2) and (3) 2435 // require some barriers before volatile stores and after volatile 2436 // loads. These nearly cover requirement (1) but miss the 2437 // volatile-store-volatile-load case. This final case is placed after 2438 // volatile-stores although it could just as well go before 2439 // volatile-loads. 2440 2441 // The registers cache and index expected to be set before call. 2442 // Correct values of the cache and index registers are preserved. 2443 // Kills: 2444 // Rcache (if has_tos) 2445 // Rscratch 2446 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2447 2448 assert_different_registers(Rcache, Rscratch); 2449 2450 if (JvmtiExport::can_post_field_access()) { 2451 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2452 Label Lno_field_access_post; 2453 2454 // Check if post field access in enabled. 2455 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2456 __ lwz(Rscratch, offs, Rscratch); 2457 2458 __ cmpwi(CCR0, Rscratch, 0); 2459 __ beq(CCR0, Lno_field_access_post); 2460 2461 // Post access enabled - do it! 2462 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2463 if (is_static) { 2464 __ li(R17_tos, 0); 2465 } else { 2466 if (has_tos) { 2467 // The fast bytecode versions have obj ptr in register. 2468 // Thus, save object pointer before call_VM() clobbers it 2469 // put object on tos where GC wants it. 2470 __ push_ptr(R17_tos); 2471 } else { 2472 // Load top of stack (do not pop the value off the stack). 2473 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2474 } 2475 __ verify_oop(R17_tos); 2476 } 2477 // tos: object pointer or NULL if static 2478 // cache: cache entry pointer 2479 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2480 if (!is_static && has_tos) { 2481 // Restore object pointer. 2482 __ pop_ptr(R17_tos); 2483 __ verify_oop(R17_tos); 2484 } else { 2485 // Cache is still needed to get class or obj. 2486 __ get_cache_and_index_at_bcp(Rcache, 1); 2487 } 2488 2489 __ align(32, 12); 2490 __ bind(Lno_field_access_post); 2491 } 2492 } 2493 2494 // kills R11_scratch1 2495 void TemplateTable::pop_and_check_object(Register Roop) { 2496 Register Rtmp = R11_scratch1; 2497 2498 assert_different_registers(Rtmp, Roop); 2499 __ pop_ptr(Roop); 2500 // For field access must check obj. 2501 __ null_check_throw(Roop, -1, Rtmp); 2502 __ verify_oop(Roop); 2503 } 2504 2505 // PPC64: implement volatile loads as fence-store-acquire. 2506 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2507 transition(vtos, vtos); 2508 2509 Label Lacquire, Lisync; 2510 2511 const Register Rcache = R3_ARG1, 2512 Rclass_or_obj = R22_tmp2, 2513 Roffset = R23_tmp3, 2514 Rflags = R31, 2515 Rbtable = R5_ARG3, 2516 Rbc = R6_ARG4, 2517 Rscratch = R12_scratch2; 2518 2519 static address field_branch_table[number_of_states], 2520 static_branch_table[number_of_states]; 2521 2522 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2523 2524 // Get field offset. 2525 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2526 2527 // JVMTI support 2528 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2529 2530 // Load after possible GC. 2531 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2532 2533 // Load pointer to branch table. 2534 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2535 2536 // Get volatile flag. 2537 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2538 // Note: sync is needed before volatile load on PPC64. 2539 2540 // Check field type. 2541 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2542 2543 #ifdef ASSERT 2544 Label LFlagInvalid; 2545 __ cmpldi(CCR0, Rflags, number_of_states); 2546 __ bge(CCR0, LFlagInvalid); 2547 #endif 2548 2549 // Load from branch table and dispatch (volatile case: one instruction ahead). 2550 __ sldi(Rflags, Rflags, LogBytesPerWord); 2551 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2552 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2553 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2554 } 2555 __ ldx(Rbtable, Rbtable, Rflags); 2556 2557 // Get the obj from stack. 2558 if (!is_static) { 2559 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2560 } else { 2561 __ verify_oop(Rclass_or_obj); 2562 } 2563 2564 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2565 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2566 } 2567 __ mtctr(Rbtable); 2568 __ bctr(); 2569 2570 #ifdef ASSERT 2571 __ bind(LFlagInvalid); 2572 __ stop("got invalid flag", 0x654); 2573 #endif 2574 2575 if (!is_static && rc == may_not_rewrite) { 2576 // We reuse the code from is_static. It's jumped to via the table above. 2577 return; 2578 } 2579 2580 #ifdef ASSERT 2581 // __ bind(Lvtos); 2582 address pc_before_fence = __ pc(); 2583 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2584 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2585 assert(branch_table[vtos] == 0, "can't compute twice"); 2586 branch_table[vtos] = __ pc(); // non-volatile_entry point 2587 __ stop("vtos unexpected", 0x655); 2588 #endif 2589 2590 __ align(32, 28, 28); // Align load. 2591 // __ bind(Ldtos); 2592 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2593 assert(branch_table[dtos] == 0, "can't compute twice"); 2594 branch_table[dtos] = __ pc(); // non-volatile_entry point 2595 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2596 __ push(dtos); 2597 if (!is_static && rc == may_rewrite) { 2598 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2599 } 2600 { 2601 Label acquire_double; 2602 __ beq(CCR6, acquire_double); // Volatile? 2603 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2604 2605 __ bind(acquire_double); 2606 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2607 __ beq_predict_taken(CCR0, Lisync); 2608 __ b(Lisync); // In case of NAN. 2609 } 2610 2611 __ align(32, 28, 28); // Align load. 2612 // __ bind(Lftos); 2613 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2614 assert(branch_table[ftos] == 0, "can't compute twice"); 2615 branch_table[ftos] = __ pc(); // non-volatile_entry point 2616 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2617 __ push(ftos); 2618 if (!is_static && rc == may_rewrite) { 2619 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2620 } 2621 { 2622 Label acquire_float; 2623 __ beq(CCR6, acquire_float); // Volatile? 2624 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2625 2626 __ bind(acquire_float); 2627 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2628 __ beq_predict_taken(CCR0, Lisync); 2629 __ b(Lisync); // In case of NAN. 2630 } 2631 2632 __ align(32, 28, 28); // Align load. 2633 // __ bind(Litos); 2634 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2635 assert(branch_table[itos] == 0, "can't compute twice"); 2636 branch_table[itos] = __ pc(); // non-volatile_entry point 2637 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2638 __ push(itos); 2639 if (!is_static && rc == may_rewrite) { 2640 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2641 } 2642 __ beq(CCR6, Lacquire); // Volatile? 2643 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2644 2645 __ align(32, 28, 28); // Align load. 2646 // __ bind(Lltos); 2647 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2648 assert(branch_table[ltos] == 0, "can't compute twice"); 2649 branch_table[ltos] = __ pc(); // non-volatile_entry point 2650 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2651 __ push(ltos); 2652 if (!is_static && rc == may_rewrite) { 2653 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2654 } 2655 __ beq(CCR6, Lacquire); // Volatile? 2656 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2657 2658 __ align(32, 28, 28); // Align load. 2659 // __ bind(Lbtos); 2660 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2661 assert(branch_table[btos] == 0, "can't compute twice"); 2662 branch_table[btos] = __ pc(); // non-volatile_entry point 2663 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2664 __ extsb(R17_tos, R17_tos); 2665 __ push(btos); 2666 if (!is_static && rc == may_rewrite) { 2667 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2668 } 2669 __ beq(CCR6, Lacquire); // Volatile? 2670 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2671 2672 __ align(32, 28, 28); // Align load. 2673 // __ bind(Lztos); (same code as btos) 2674 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2675 assert(branch_table[ztos] == 0, "can't compute twice"); 2676 branch_table[ztos] = __ pc(); // non-volatile_entry point 2677 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2678 __ push(ztos); 2679 if (!is_static && rc == may_rewrite) { 2680 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2681 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2682 } 2683 __ beq(CCR6, Lacquire); // Volatile? 2684 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2685 2686 __ align(32, 28, 28); // Align load. 2687 // __ bind(Lctos); 2688 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2689 assert(branch_table[ctos] == 0, "can't compute twice"); 2690 branch_table[ctos] = __ pc(); // non-volatile_entry point 2691 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2692 __ push(ctos); 2693 if (!is_static && rc == may_rewrite) { 2694 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2695 } 2696 __ beq(CCR6, Lacquire); // Volatile? 2697 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2698 2699 __ align(32, 28, 28); // Align load. 2700 // __ bind(Lstos); 2701 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2702 assert(branch_table[stos] == 0, "can't compute twice"); 2703 branch_table[stos] = __ pc(); // non-volatile_entry point 2704 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2705 __ push(stos); 2706 if (!is_static && rc == may_rewrite) { 2707 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2708 } 2709 __ beq(CCR6, Lacquire); // Volatile? 2710 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2711 2712 __ align(32, 28, 28); // Align load. 2713 // __ bind(Latos); 2714 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2715 assert(branch_table[atos] == 0, "can't compute twice"); 2716 branch_table[atos] = __ pc(); // non-volatile_entry point 2717 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2718 __ verify_oop(R17_tos); 2719 __ push(atos); 2720 //__ dcbt(R17_tos); // prefetch 2721 if (!is_static && rc == may_rewrite) { 2722 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2723 } 2724 __ beq(CCR6, Lacquire); // Volatile? 2725 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2726 2727 __ align(32, 12); 2728 __ bind(Lacquire); 2729 __ twi_0(R17_tos); 2730 __ bind(Lisync); 2731 __ isync(); // acquire 2732 2733 #ifdef ASSERT 2734 for (int i = 0; i<number_of_states; ++i) { 2735 assert(branch_table[i], "get initialization"); 2736 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2737 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2738 } 2739 #endif 2740 } 2741 2742 void TemplateTable::getfield(int byte_no) { 2743 getfield_or_static(byte_no, false); 2744 } 2745 2746 void TemplateTable::nofast_getfield(int byte_no) { 2747 getfield_or_static(byte_no, false, may_not_rewrite); 2748 } 2749 2750 void TemplateTable::getstatic(int byte_no) { 2751 getfield_or_static(byte_no, true); 2752 } 2753 2754 // The registers cache and index expected to be set before call. 2755 // The function may destroy various registers, just not the cache and index registers. 2756 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2757 2758 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2759 2760 if (JvmtiExport::can_post_field_modification()) { 2761 Label Lno_field_mod_post; 2762 2763 // Check if post field access in enabled. 2764 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2765 __ lwz(Rscratch, offs, Rscratch); 2766 2767 __ cmpwi(CCR0, Rscratch, 0); 2768 __ beq(CCR0, Lno_field_mod_post); 2769 2770 // Do the post 2771 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2772 const Register Robj = Rscratch; 2773 2774 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2775 if (is_static) { 2776 // Life is simple. Null out the object pointer. 2777 __ li(Robj, 0); 2778 } else { 2779 // In case of the fast versions, value lives in registers => put it back on tos. 2780 int offs = Interpreter::expr_offset_in_bytes(0); 2781 Register base = R15_esp; 2782 switch(bytecode()) { 2783 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2784 case Bytecodes::_fast_iputfield: // Fall through 2785 case Bytecodes::_fast_bputfield: // Fall through 2786 case Bytecodes::_fast_zputfield: // Fall through 2787 case Bytecodes::_fast_cputfield: // Fall through 2788 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2789 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2790 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2791 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2792 default: { 2793 offs = 0; 2794 base = Robj; 2795 const Register Rflags = Robj; 2796 Label is_one_slot; 2797 // Life is harder. The stack holds the value on top, followed by the 2798 // object. We don't know the size of the value, though; it could be 2799 // one or two words depending on its type. As a result, we must find 2800 // the type to determine where the object is. 2801 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2802 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2803 2804 __ cmpwi(CCR0, Rflags, ltos); 2805 __ cmpwi(CCR1, Rflags, dtos); 2806 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2807 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2808 __ beq(CCR0, is_one_slot); 2809 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2810 __ bind(is_one_slot); 2811 break; 2812 } 2813 } 2814 __ ld(Robj, offs, base); 2815 __ verify_oop(Robj); 2816 } 2817 2818 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2819 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2820 __ get_cache_and_index_at_bcp(Rcache, 1); 2821 2822 // In case of the fast versions, value lives in registers => put it back on tos. 2823 switch(bytecode()) { 2824 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2825 case Bytecodes::_fast_iputfield: // Fall through 2826 case Bytecodes::_fast_bputfield: // Fall through 2827 case Bytecodes::_fast_zputfield: // Fall through 2828 case Bytecodes::_fast_cputfield: // Fall through 2829 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2830 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2831 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2832 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2833 default: break; // Nothin' to do. 2834 } 2835 2836 __ align(32, 12); 2837 __ bind(Lno_field_mod_post); 2838 } 2839 } 2840 2841 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2842 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2843 Label Lvolatile; 2844 2845 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2846 Rclass_or_obj = R31, // Needs to survive C call. 2847 Roffset = R22_tmp2, // Needs to survive C call. 2848 Rflags = R3_ARG1, 2849 Rbtable = R4_ARG2, 2850 Rscratch = R11_scratch1, 2851 Rscratch2 = R12_scratch2, 2852 Rscratch3 = R6_ARG4, 2853 Rbc = Rscratch3; 2854 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2855 2856 static address field_rw_branch_table[number_of_states], 2857 field_norw_branch_table[number_of_states], 2858 static_branch_table[number_of_states]; 2859 2860 address* branch_table = is_static ? static_branch_table : 2861 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2862 2863 // Stack (grows up): 2864 // value 2865 // obj 2866 2867 // Load the field offset. 2868 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2869 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2870 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2871 2872 // Load pointer to branch table. 2873 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2874 2875 // Get volatile flag. 2876 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2877 2878 // Check the field type. 2879 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2880 2881 #ifdef ASSERT 2882 Label LFlagInvalid; 2883 __ cmpldi(CCR0, Rflags, number_of_states); 2884 __ bge(CCR0, LFlagInvalid); 2885 #endif 2886 2887 // Load from branch table and dispatch (volatile case: one instruction ahead). 2888 __ sldi(Rflags, Rflags, LogBytesPerWord); 2889 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2890 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2891 } 2892 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2893 __ ldx(Rbtable, Rbtable, Rflags); 2894 2895 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2896 __ mtctr(Rbtable); 2897 __ bctr(); 2898 2899 #ifdef ASSERT 2900 __ bind(LFlagInvalid); 2901 __ stop("got invalid flag", 0x656); 2902 2903 // __ bind(Lvtos); 2904 address pc_before_release = __ pc(); 2905 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2906 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2907 assert(branch_table[vtos] == 0, "can't compute twice"); 2908 branch_table[vtos] = __ pc(); // non-volatile_entry point 2909 __ stop("vtos unexpected", 0x657); 2910 #endif 2911 2912 __ align(32, 28, 28); // Align pop. 2913 // __ bind(Ldtos); 2914 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2915 assert(branch_table[dtos] == 0, "can't compute twice"); 2916 branch_table[dtos] = __ pc(); // non-volatile_entry point 2917 __ pop(dtos); 2918 if (!is_static) { 2919 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2920 } 2921 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2922 if (!is_static && rc == may_rewrite) { 2923 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2924 } 2925 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2926 __ beq(CR_is_vol, Lvolatile); // Volatile? 2927 } 2928 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2929 2930 __ align(32, 28, 28); // Align pop. 2931 // __ bind(Lftos); 2932 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2933 assert(branch_table[ftos] == 0, "can't compute twice"); 2934 branch_table[ftos] = __ pc(); // non-volatile_entry point 2935 __ pop(ftos); 2936 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2937 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2938 if (!is_static && rc == may_rewrite) { 2939 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2940 } 2941 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2942 __ beq(CR_is_vol, Lvolatile); // Volatile? 2943 } 2944 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2945 2946 __ align(32, 28, 28); // Align pop. 2947 // __ bind(Litos); 2948 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2949 assert(branch_table[itos] == 0, "can't compute twice"); 2950 branch_table[itos] = __ pc(); // non-volatile_entry point 2951 __ pop(itos); 2952 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2953 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2954 if (!is_static && rc == may_rewrite) { 2955 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2956 } 2957 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2958 __ beq(CR_is_vol, Lvolatile); // Volatile? 2959 } 2960 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2961 2962 __ align(32, 28, 28); // Align pop. 2963 // __ bind(Lltos); 2964 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2965 assert(branch_table[ltos] == 0, "can't compute twice"); 2966 branch_table[ltos] = __ pc(); // non-volatile_entry point 2967 __ pop(ltos); 2968 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2969 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2970 if (!is_static && rc == may_rewrite) { 2971 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2972 } 2973 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2974 __ beq(CR_is_vol, Lvolatile); // Volatile? 2975 } 2976 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2977 2978 __ align(32, 28, 28); // Align pop. 2979 // __ bind(Lbtos); 2980 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2981 assert(branch_table[btos] == 0, "can't compute twice"); 2982 branch_table[btos] = __ pc(); // non-volatile_entry point 2983 __ pop(btos); 2984 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2985 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2986 if (!is_static && rc == may_rewrite) { 2987 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2988 } 2989 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2990 __ beq(CR_is_vol, Lvolatile); // Volatile? 2991 } 2992 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2993 2994 __ align(32, 28, 28); // Align pop. 2995 // __ bind(Lztos); 2996 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2997 assert(branch_table[ztos] == 0, "can't compute twice"); 2998 branch_table[ztos] = __ pc(); // non-volatile_entry point 2999 __ pop(ztos); 3000 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 3001 __ andi(R17_tos, R17_tos, 0x1); 3002 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3003 if (!is_static && rc == may_rewrite) { 3004 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); 3005 } 3006 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3007 __ beq(CR_is_vol, Lvolatile); // Volatile? 3008 } 3009 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3010 3011 __ align(32, 28, 28); // Align pop. 3012 // __ bind(Lctos); 3013 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3014 assert(branch_table[ctos] == 0, "can't compute twice"); 3015 branch_table[ctos] = __ pc(); // non-volatile_entry point 3016 __ pop(ctos); 3017 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 3018 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3019 if (!is_static && rc == may_rewrite) { 3020 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 3021 } 3022 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3023 __ beq(CR_is_vol, Lvolatile); // Volatile? 3024 } 3025 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3026 3027 __ align(32, 28, 28); // Align pop. 3028 // __ bind(Lstos); 3029 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3030 assert(branch_table[stos] == 0, "can't compute twice"); 3031 branch_table[stos] = __ pc(); // non-volatile_entry point 3032 __ pop(stos); 3033 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 3034 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3035 if (!is_static && rc == may_rewrite) { 3036 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 3037 } 3038 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3039 __ beq(CR_is_vol, Lvolatile); // Volatile? 3040 } 3041 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3042 3043 __ align(32, 28, 28); // Align pop. 3044 // __ bind(Latos); 3045 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3046 assert(branch_table[atos] == 0, "can't compute twice"); 3047 branch_table[atos] = __ pc(); // non-volatile_entry point 3048 __ pop(atos); 3049 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 3050 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 3051 if (!is_static && rc == may_rewrite) { 3052 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 3053 } 3054 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3055 __ beq(CR_is_vol, Lvolatile); // Volatile? 3056 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3057 3058 __ align(32, 12); 3059 __ bind(Lvolatile); 3060 __ fence(); 3061 } 3062 // fallthru: __ b(Lexit); 3063 3064 #ifdef ASSERT 3065 for (int i = 0; i<number_of_states; ++i) { 3066 assert(branch_table[i], "put initialization"); 3067 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 3068 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 3069 } 3070 #endif 3071 } 3072 3073 void TemplateTable::putfield(int byte_no) { 3074 putfield_or_static(byte_no, false); 3075 } 3076 3077 void TemplateTable::nofast_putfield(int byte_no) { 3078 putfield_or_static(byte_no, false, may_not_rewrite); 3079 } 3080 3081 void TemplateTable::putstatic(int byte_no) { 3082 putfield_or_static(byte_no, true); 3083 } 3084 3085 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 3086 void TemplateTable::jvmti_post_fast_field_mod() { 3087 __ should_not_reach_here(); 3088 } 3089 3090 void TemplateTable::fast_storefield(TosState state) { 3091 transition(state, vtos); 3092 3093 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 3094 Rclass_or_obj = R31, // Needs to survive C call. 3095 Roffset = R22_tmp2, // Needs to survive C call. 3096 Rflags = R3_ARG1, 3097 Rscratch = R11_scratch1, 3098 Rscratch2 = R12_scratch2, 3099 Rscratch3 = R4_ARG2; 3100 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 3101 3102 // Constant pool already resolved => Load flags and offset of field. 3103 __ get_cache_and_index_at_bcp(Rcache, 1); 3104 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 3105 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3106 3107 // Get the obj and the final store addr. 3108 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 3109 3110 // Get volatile flag. 3111 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3112 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 3113 { 3114 Label LnotVolatile; 3115 __ beq(CCR0, LnotVolatile); 3116 __ release(); 3117 __ align(32, 12); 3118 __ bind(LnotVolatile); 3119 } 3120 3121 // Do the store and fencing. 3122 switch(bytecode()) { 3123 case Bytecodes::_fast_aputfield: 3124 // Store into the field. 3125 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 3126 break; 3127 3128 case Bytecodes::_fast_iputfield: 3129 __ stwx(R17_tos, Rclass_or_obj, Roffset); 3130 break; 3131 3132 case Bytecodes::_fast_lputfield: 3133 __ stdx(R17_tos, Rclass_or_obj, Roffset); 3134 break; 3135 3136 case Bytecodes::_fast_zputfield: 3137 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 3138 // fall through to bputfield 3139 case Bytecodes::_fast_bputfield: 3140 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3141 break; 3142 3143 case Bytecodes::_fast_cputfield: 3144 case Bytecodes::_fast_sputfield: 3145 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3146 break; 3147 3148 case Bytecodes::_fast_fputfield: 3149 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 3150 break; 3151 3152 case Bytecodes::_fast_dputfield: 3153 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 3154 break; 3155 3156 default: ShouldNotReachHere(); 3157 } 3158 3159 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3160 Label LVolatile; 3161 __ beq(CR_is_vol, LVolatile); 3162 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3163 3164 __ align(32, 12); 3165 __ bind(LVolatile); 3166 __ fence(); 3167 } 3168 } 3169 3170 void TemplateTable::fast_accessfield(TosState state) { 3171 transition(atos, state); 3172 3173 Label LisVolatile; 3174 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3175 3176 const Register Rcache = R3_ARG1, 3177 Rclass_or_obj = R17_tos, 3178 Roffset = R22_tmp2, 3179 Rflags = R23_tmp3, 3180 Rscratch = R12_scratch2; 3181 3182 // Constant pool already resolved. Get the field offset. 3183 __ get_cache_and_index_at_bcp(Rcache, 1); 3184 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3185 3186 // JVMTI support 3187 jvmti_post_field_access(Rcache, Rscratch, false, true); 3188 3189 // Get the load address. 3190 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3191 3192 // Get volatile flag. 3193 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3194 __ bne(CCR0, LisVolatile); 3195 3196 switch(bytecode()) { 3197 case Bytecodes::_fast_agetfield: 3198 { 3199 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3200 __ verify_oop(R17_tos); 3201 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3202 3203 __ bind(LisVolatile); 3204 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3205 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3206 __ verify_oop(R17_tos); 3207 __ twi_0(R17_tos); 3208 __ isync(); 3209 break; 3210 } 3211 case Bytecodes::_fast_igetfield: 3212 { 3213 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3214 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3215 3216 __ bind(LisVolatile); 3217 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3218 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3219 __ twi_0(R17_tos); 3220 __ isync(); 3221 break; 3222 } 3223 case Bytecodes::_fast_lgetfield: 3224 { 3225 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3226 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3227 3228 __ bind(LisVolatile); 3229 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3230 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3231 __ twi_0(R17_tos); 3232 __ isync(); 3233 break; 3234 } 3235 case Bytecodes::_fast_bgetfield: 3236 { 3237 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3238 __ extsb(R17_tos, R17_tos); 3239 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3240 3241 __ bind(LisVolatile); 3242 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3243 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3244 __ twi_0(R17_tos); 3245 __ extsb(R17_tos, R17_tos); 3246 __ isync(); 3247 break; 3248 } 3249 case Bytecodes::_fast_cgetfield: 3250 { 3251 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3252 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3253 3254 __ bind(LisVolatile); 3255 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3256 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3257 __ twi_0(R17_tos); 3258 __ isync(); 3259 break; 3260 } 3261 case Bytecodes::_fast_sgetfield: 3262 { 3263 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3264 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3265 3266 __ bind(LisVolatile); 3267 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3268 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3269 __ twi_0(R17_tos); 3270 __ isync(); 3271 break; 3272 } 3273 case Bytecodes::_fast_fgetfield: 3274 { 3275 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3276 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3277 3278 __ bind(LisVolatile); 3279 Label Ldummy; 3280 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3281 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3282 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3283 __ bne_predict_not_taken(CCR0, Ldummy); 3284 __ bind(Ldummy); 3285 __ isync(); 3286 break; 3287 } 3288 case Bytecodes::_fast_dgetfield: 3289 { 3290 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3291 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3292 3293 __ bind(LisVolatile); 3294 Label Ldummy; 3295 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3296 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3297 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3298 __ bne_predict_not_taken(CCR0, Ldummy); 3299 __ bind(Ldummy); 3300 __ isync(); 3301 break; 3302 } 3303 default: ShouldNotReachHere(); 3304 } 3305 } 3306 3307 void TemplateTable::fast_xaccess(TosState state) { 3308 transition(vtos, state); 3309 3310 Label LisVolatile; 3311 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3312 const Register Rcache = R3_ARG1, 3313 Rclass_or_obj = R17_tos, 3314 Roffset = R22_tmp2, 3315 Rflags = R23_tmp3, 3316 Rscratch = R12_scratch2; 3317 3318 __ ld(Rclass_or_obj, 0, R18_locals); 3319 3320 // Constant pool already resolved. Get the field offset. 3321 __ get_cache_and_index_at_bcp(Rcache, 2); 3322 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3323 3324 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3325 3326 // Needed to report exception at the correct bcp. 3327 __ addi(R14_bcp, R14_bcp, 1); 3328 3329 // Get the load address. 3330 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3331 3332 // Get volatile flag. 3333 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3334 __ bne(CCR0, LisVolatile); 3335 3336 switch(state) { 3337 case atos: 3338 { 3339 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3340 __ verify_oop(R17_tos); 3341 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3342 3343 __ bind(LisVolatile); 3344 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3345 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3346 __ verify_oop(R17_tos); 3347 __ twi_0(R17_tos); 3348 __ isync(); 3349 break; 3350 } 3351 case itos: 3352 { 3353 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3354 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3355 3356 __ bind(LisVolatile); 3357 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3358 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3359 __ twi_0(R17_tos); 3360 __ isync(); 3361 break; 3362 } 3363 case ftos: 3364 { 3365 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3366 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3367 3368 __ bind(LisVolatile); 3369 Label Ldummy; 3370 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3371 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3372 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3373 __ bne_predict_not_taken(CCR0, Ldummy); 3374 __ bind(Ldummy); 3375 __ isync(); 3376 break; 3377 } 3378 default: ShouldNotReachHere(); 3379 } 3380 __ addi(R14_bcp, R14_bcp, -1); 3381 } 3382 3383 // ============================================================================ 3384 // Calls 3385 3386 // Common code for invoke 3387 // 3388 // Input: 3389 // - byte_no 3390 // 3391 // Output: 3392 // - Rmethod: The method to invoke next. 3393 // - Rret_addr: The return address to return to. 3394 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3395 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3396 // - Rflags: Method flags from const pool cache. 3397 // 3398 // Kills: 3399 // - Rscratch1 3400 // 3401 void TemplateTable::prepare_invoke(int byte_no, 3402 Register Rmethod, // linked method (or i-klass) 3403 Register Rret_addr,// return address 3404 Register Rindex, // itable index, MethodType, etc. 3405 Register Rrecv, // If caller wants to see it. 3406 Register Rflags, // If caller wants to test it. 3407 Register Rscratch 3408 ) { 3409 // Determine flags. 3410 const Bytecodes::Code code = bytecode(); 3411 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3412 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3413 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3414 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3415 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3416 const bool load_receiver = (Rrecv != noreg); 3417 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3418 3419 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3420 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3421 assert_different_registers(Rret_addr, Rscratch); 3422 3423 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3424 3425 // Saving of SP done in call_from_interpreter. 3426 3427 // Maybe push "appendix" to arguments. 3428 if (is_invokedynamic || is_invokehandle) { 3429 Label Ldone; 3430 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3431 __ beq(CCR0, Ldone); 3432 // Push "appendix" (MethodType, CallSite, etc.). 3433 // This must be done before we get the receiver, 3434 // since the parameter_size includes it. 3435 __ load_resolved_reference_at_index(Rscratch, Rindex); 3436 __ verify_oop(Rscratch); 3437 __ push_ptr(Rscratch); 3438 __ bind(Ldone); 3439 } 3440 3441 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3442 if (load_receiver) { 3443 const Register Rparam_count = Rscratch; 3444 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3445 __ load_receiver(Rparam_count, Rrecv); 3446 __ verify_oop(Rrecv); 3447 } 3448 3449 // Get return address. 3450 { 3451 Register Rtable_addr = Rscratch; 3452 Register Rret_type = Rret_addr; 3453 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3454 3455 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3456 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3457 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3458 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3459 // Get return address. 3460 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3461 } 3462 } 3463 3464 // Helper for virtual calls. Load target out of vtable and jump off! 3465 // Kills all passed registers. 3466 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3467 3468 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3469 const Register Rtarget_method = Rindex; 3470 3471 // Get target method & entry point. 3472 const int base = in_bytes(Klass::vtable_start_offset()); 3473 // Calc vtable addr scale the vtable index by 8. 3474 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes())); 3475 // Load target. 3476 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3477 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3478 // Argument and return type profiling. 3479 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3480 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3481 } 3482 3483 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3484 void TemplateTable::invokevirtual(int byte_no) { 3485 transition(vtos, vtos); 3486 3487 Register Rtable_addr = R11_scratch1, 3488 Rret_type = R12_scratch2, 3489 Rret_addr = R5_ARG3, 3490 Rflags = R22_tmp2, // Should survive C call. 3491 Rrecv = R3_ARG1, 3492 Rrecv_klass = Rrecv, 3493 Rvtableindex_or_method = R31, // Should survive C call. 3494 Rnum_params = R4_ARG2, 3495 Rnew_bc = R6_ARG4; 3496 3497 Label LnotFinal; 3498 3499 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3500 3501 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3502 __ bfalse(CCR0, LnotFinal); 3503 3504 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 3505 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3506 } 3507 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3508 3509 __ align(32, 12); 3510 __ bind(LnotFinal); 3511 // Load "this" pointer (receiver). 3512 __ rldicl(Rnum_params, Rflags, 64, 48); 3513 __ load_receiver(Rnum_params, Rrecv); 3514 __ verify_oop(Rrecv); 3515 3516 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3517 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3518 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3519 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3520 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3521 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3522 __ load_klass(Rrecv_klass, Rrecv); 3523 __ verify_klass_ptr(Rrecv_klass); 3524 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3525 3526 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3527 } 3528 3529 void TemplateTable::fast_invokevfinal(int byte_no) { 3530 transition(vtos, vtos); 3531 3532 assert(byte_no == f2_byte, "use this argument"); 3533 Register Rflags = R22_tmp2, 3534 Rmethod = R31; 3535 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3536 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3537 } 3538 3539 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3540 3541 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3542 3543 // Load receiver from stack slot. 3544 Register Rrecv = Rscratch2; 3545 Register Rnum_params = Rrecv; 3546 3547 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3548 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3549 3550 // Get return address. 3551 Register Rtable_addr = Rscratch1, 3552 Rret_addr = Rflags, 3553 Rret_type = Rret_addr; 3554 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3555 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3556 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3557 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3558 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3559 3560 // Load receiver and receiver NULL check. 3561 __ load_receiver(Rnum_params, Rrecv); 3562 __ null_check_throw(Rrecv, -1, Rscratch1); 3563 3564 __ profile_final_call(Rrecv, Rscratch1); 3565 // Argument and return type profiling. 3566 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3567 3568 // Do the call. 3569 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3570 } 3571 3572 void TemplateTable::invokespecial(int byte_no) { 3573 assert(byte_no == f1_byte, "use this argument"); 3574 transition(vtos, vtos); 3575 3576 Register Rtable_addr = R3_ARG1, 3577 Rret_addr = R4_ARG2, 3578 Rflags = R5_ARG3, 3579 Rreceiver = R6_ARG4, 3580 Rmethod = R31; 3581 3582 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3583 3584 // Receiver NULL check. 3585 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3586 3587 __ profile_call(R11_scratch1, R12_scratch2); 3588 // Argument and return type profiling. 3589 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3590 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3591 } 3592 3593 void TemplateTable::invokestatic(int byte_no) { 3594 assert(byte_no == f1_byte, "use this argument"); 3595 transition(vtos, vtos); 3596 3597 Register Rtable_addr = R3_ARG1, 3598 Rret_addr = R4_ARG2, 3599 Rflags = R5_ARG3; 3600 3601 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3602 3603 __ profile_call(R11_scratch1, R12_scratch2); 3604 // Argument and return type profiling. 3605 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3606 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3607 } 3608 3609 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3610 Register Rret, 3611 Register Rflags, 3612 Register Rmethod, 3613 Register Rtemp1, 3614 Register Rtemp2) { 3615 3616 assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3617 Label LnotFinal; 3618 3619 // Check for vfinal. 3620 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3621 __ bfalse(CCR0, LnotFinal); 3622 3623 Register Rscratch = Rflags; // Rflags is dead now. 3624 3625 // Final call case. 3626 __ profile_final_call(Rtemp1, Rscratch); 3627 // Argument and return type profiling. 3628 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true); 3629 // Do the final call - the index (f2) contains the method. 3630 __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */); 3631 3632 // Non-final callc case. 3633 __ bind(LnotFinal); 3634 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3635 generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch); 3636 } 3637 3638 void TemplateTable::invokeinterface(int byte_no) { 3639 assert(byte_no == f1_byte, "use this argument"); 3640 transition(vtos, vtos); 3641 3642 const Register Rscratch1 = R11_scratch1, 3643 Rscratch2 = R12_scratch2, 3644 Rmethod = R6_ARG4, 3645 Rmethod2 = R9_ARG7, 3646 Rinterface_klass = R5_ARG3, 3647 Rret_addr = R8_ARG6, 3648 Rindex = R10_ARG8, 3649 Rreceiver = R3_ARG1, 3650 Rrecv_klass = R4_ARG2, 3651 Rflags = R7_ARG5; 3652 3653 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1); 3654 3655 // Get receiver klass. 3656 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2); 3657 __ load_klass(Rrecv_klass, Rreceiver); 3658 3659 // Check corner case object method. 3660 Label LobjectMethod, L_no_such_interface, Lthrow_ame; 3661 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3662 __ btrue(CCR0, LobjectMethod); 3663 3664 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2, 3665 L_no_such_interface, /*return_method=*/false); 3666 3667 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3668 3669 // Find entry point to call. 3670 3671 // Get declaring interface class from method 3672 __ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod); 3673 __ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass); 3674 __ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass); 3675 3676 // Get itable index from method 3677 __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod); 3678 __ subfic(Rindex, Rindex, Method::itable_index_max); 3679 3680 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2, 3681 L_no_such_interface); 3682 3683 __ cmpdi(CCR0, Rmethod2, 0); 3684 __ beq(CCR0, Lthrow_ame); 3685 // Found entry. Jump off! 3686 // Argument and return type profiling. 3687 __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true); 3688 //__ profile_called_method(Rindex, Rscratch1); 3689 __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2); 3690 3691 // Vtable entry was NULL => Throw abstract method error. 3692 __ bind(Lthrow_ame); 3693 // Pass arguments for generating a verbose error message. 3694 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3695 Rrecv_klass, Rmethod); 3696 3697 // Interface was not found => Throw incompatible class change error. 3698 __ bind(L_no_such_interface); 3699 // Pass arguments for generating a verbose error message. 3700 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3701 Rrecv_klass, Rinterface_klass); 3702 DEBUG_ONLY( __ should_not_reach_here(); ) 3703 3704 // Special case of invokeinterface called for virtual method of 3705 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3706 // The invokeinterface was rewritten to a invokevirtual, hence we have 3707 // to handle this corner case. This code isn't produced by javac, but could 3708 // be produced by another compliant java compiler. 3709 __ bind(LobjectMethod); 3710 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2); 3711 } 3712 3713 void TemplateTable::invokedynamic(int byte_no) { 3714 transition(vtos, vtos); 3715 3716 const Register Rret_addr = R3_ARG1, 3717 Rflags = R4_ARG2, 3718 Rmethod = R22_tmp2, 3719 Rscratch1 = R11_scratch1, 3720 Rscratch2 = R12_scratch2; 3721 3722 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3723 3724 // Profile this call. 3725 __ profile_call(Rscratch1, Rscratch2); 3726 3727 // Off we go. With the new method handles, we don't jump to a method handle 3728 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3729 // to be the callsite object the bootstrap method returned. This is passed to a 3730 // "link" method which does the dispatch (Most likely just grabs the MH stored 3731 // inside the callsite and does an invokehandle). 3732 // Argument and return type profiling. 3733 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3734 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3735 } 3736 3737 void TemplateTable::invokehandle(int byte_no) { 3738 transition(vtos, vtos); 3739 3740 const Register Rret_addr = R3_ARG1, 3741 Rflags = R4_ARG2, 3742 Rrecv = R5_ARG3, 3743 Rmethod = R22_tmp2, 3744 Rscratch1 = R11_scratch1, 3745 Rscratch2 = R12_scratch2; 3746 3747 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3748 __ verify_method_ptr(Rmethod); 3749 __ null_check_throw(Rrecv, -1, Rscratch2); 3750 3751 __ profile_final_call(Rrecv, Rscratch1); 3752 3753 // Still no call from handle => We call the method handle interpreter here. 3754 // Argument and return type profiling. 3755 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3756 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3757 } 3758 3759 // ============================================================================= 3760 // Allocation 3761 3762 // Puts allocated obj ref onto the expression stack. 3763 void TemplateTable::_new() { 3764 transition(vtos, atos); 3765 3766 Label Lslow_case, 3767 Ldone; 3768 3769 const Register RallocatedObject = R17_tos, 3770 RinstanceKlass = R9_ARG7, 3771 Rscratch = R11_scratch1, 3772 Roffset = R8_ARG6, 3773 Rinstance_size = Roffset, 3774 Rcpool = R4_ARG2, 3775 Rtags = R3_ARG1, 3776 Rindex = R5_ARG3; 3777 3778 // -------------------------------------------------------------------------- 3779 // Check if fast case is possible. 3780 3781 // Load pointers to const pool and const pool's tags array. 3782 __ get_cpool_and_tags(Rcpool, Rtags); 3783 // Load index of constant pool entry. 3784 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3785 3786 // Note: compared to other architectures, PPC's implementation always goes 3787 // to the slow path if TLAB is used and fails. 3788 if (UseTLAB) { 3789 // Make sure the class we're about to instantiate has been resolved 3790 // This is done before loading instanceKlass to be consistent with the order 3791 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3792 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3793 __ lbzx(Rtags, Rindex, Rtags); 3794 3795 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3796 __ bne(CCR0, Lslow_case); 3797 3798 // Get instanceKlass 3799 __ sldi(Roffset, Rindex, LogBytesPerWord); 3800 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass); 3801 3802 // Make sure klass is fully initialized and get instance_size. 3803 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3804 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3805 3806 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3807 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3808 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3809 3810 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3811 __ beq(CCR0, Lslow_case); 3812 3813 // -------------------------------------------------------------------------- 3814 // Fast case: 3815 // Allocate the instance. 3816 // 1) Try to allocate in the TLAB. 3817 // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3818 3819 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3820 Register RnewTopValue = R6_ARG4; 3821 Register RendValue = R7_ARG5; 3822 3823 // Check if we can allocate in the TLAB. 3824 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3825 __ ld(RendValue, in_bytes(JavaThread::tlab_current_end_offset()), R16_thread); 3826 3827 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3828 3829 // If there is enough space, we do not CAS and do not clear. 3830 __ cmpld(CCR0, RnewTopValue, RendValue); 3831 __ bgt(CCR0, Lslow_case); 3832 3833 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3834 3835 if (!ZeroTLAB) { 3836 // -------------------------------------------------------------------------- 3837 // Init1: Zero out newly allocated memory. 3838 // Initialize remaining object fields. 3839 Register Rbase = Rtags; 3840 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3841 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3842 __ srdi(Rinstance_size, Rinstance_size, 3); 3843 3844 // Clear out object skipping header. Takes also care of the zero length case. 3845 __ clear_memory_doubleword(Rbase, Rinstance_size); 3846 } 3847 3848 // -------------------------------------------------------------------------- 3849 // Init2: Initialize the header: mark, klass 3850 // Init mark. 3851 if (UseBiasedLocking) { 3852 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3853 } else { 3854 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3855 } 3856 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3857 3858 // Init klass. 3859 __ store_klass_gap(RallocatedObject); 3860 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3861 3862 // Check and trigger dtrace event. 3863 SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone); 3864 __ push(atos); 3865 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3866 __ pop(atos); 3867 3868 __ b(Ldone); 3869 } 3870 3871 // -------------------------------------------------------------------------- 3872 // slow case 3873 __ bind(Lslow_case); 3874 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3875 3876 // continue 3877 __ bind(Ldone); 3878 3879 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3880 __ membar(Assembler::StoreStore); 3881 } 3882 3883 void TemplateTable::newarray() { 3884 transition(itos, atos); 3885 3886 __ lbz(R4, 1, R14_bcp); 3887 __ extsw(R5, R17_tos); 3888 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3889 3890 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3891 __ membar(Assembler::StoreStore); 3892 } 3893 3894 void TemplateTable::anewarray() { 3895 transition(itos, atos); 3896 3897 __ get_constant_pool(R4); 3898 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3899 __ extsw(R6, R17_tos); // size 3900 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3901 3902 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3903 __ membar(Assembler::StoreStore); 3904 } 3905 3906 // Allocate a multi dimensional array 3907 void TemplateTable::multianewarray() { 3908 transition(vtos, atos); 3909 3910 Register Rptr = R31; // Needs to survive C call. 3911 3912 // Put ndims * wordSize into frame temp slot 3913 __ lbz(Rptr, 3, R14_bcp); 3914 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3915 // Esp points past last_dim, so set to R4 to first_dim address. 3916 __ add(R4, Rptr, R15_esp); 3917 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3918 // Pop all dimensions off the stack. 3919 __ add(R15_esp, Rptr, R15_esp); 3920 3921 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3922 __ membar(Assembler::StoreStore); 3923 } 3924 3925 void TemplateTable::arraylength() { 3926 transition(atos, itos); 3927 3928 Label LnoException; 3929 __ verify_oop(R17_tos); 3930 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3931 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3932 } 3933 3934 // ============================================================================ 3935 // Typechecks 3936 3937 void TemplateTable::checkcast() { 3938 transition(atos, atos); 3939 3940 Label Ldone, Lis_null, Lquicked, Lresolved; 3941 Register Roffset = R6_ARG4, 3942 RobjKlass = R4_ARG2, 3943 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3944 Rcpool = R11_scratch1, 3945 Rtags = R12_scratch2; 3946 3947 // Null does not pass. 3948 __ cmpdi(CCR0, R17_tos, 0); 3949 __ beq(CCR0, Lis_null); 3950 3951 // Get constant pool tag to find out if the bytecode has already been "quickened". 3952 __ get_cpool_and_tags(Rcpool, Rtags); 3953 3954 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3955 3956 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3957 __ lbzx(Rtags, Rtags, Roffset); 3958 3959 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3960 __ beq(CCR0, Lquicked); 3961 3962 // Call into the VM to "quicken" instanceof. 3963 __ push_ptr(); // for GC 3964 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3965 __ get_vm_result_2(RspecifiedKlass); 3966 __ pop_ptr(); // Restore receiver. 3967 __ b(Lresolved); 3968 3969 // Extract target class from constant pool. 3970 __ bind(Lquicked); 3971 __ sldi(Roffset, Roffset, LogBytesPerWord); 3972 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3973 3974 // Do the checkcast. 3975 __ bind(Lresolved); 3976 // Get value klass in RobjKlass. 3977 __ load_klass(RobjKlass, R17_tos); 3978 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3979 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3980 3981 // Not a subtype; so must throw exception 3982 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3983 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3984 __ mtctr(R11_scratch1); 3985 __ bctr(); 3986 3987 // Profile the null case. 3988 __ align(32, 12); 3989 __ bind(Lis_null); 3990 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3991 3992 __ align(32, 12); 3993 __ bind(Ldone); 3994 } 3995 3996 // Output: 3997 // - tos == 0: Obj was null or not an instance of class. 3998 // - tos == 1: Obj was an instance of class. 3999 void TemplateTable::instanceof() { 4000 transition(atos, itos); 4001 4002 Label Ldone, Lis_null, Lquicked, Lresolved; 4003 Register Roffset = R6_ARG4, 4004 RobjKlass = R4_ARG2, 4005 RspecifiedKlass = R5_ARG3, 4006 Rcpool = R11_scratch1, 4007 Rtags = R12_scratch2; 4008 4009 // Null does not pass. 4010 __ cmpdi(CCR0, R17_tos, 0); 4011 __ beq(CCR0, Lis_null); 4012 4013 // Get constant pool tag to find out if the bytecode has already been "quickened". 4014 __ get_cpool_and_tags(Rcpool, Rtags); 4015 4016 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 4017 4018 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 4019 __ lbzx(Rtags, Rtags, Roffset); 4020 4021 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 4022 __ beq(CCR0, Lquicked); 4023 4024 // Call into the VM to "quicken" instanceof. 4025 __ push_ptr(); // for GC 4026 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4027 __ get_vm_result_2(RspecifiedKlass); 4028 __ pop_ptr(); // Restore receiver. 4029 __ b(Lresolved); 4030 4031 // Extract target class from constant pool. 4032 __ bind(Lquicked); 4033 __ sldi(Roffset, Roffset, LogBytesPerWord); 4034 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 4035 4036 // Do the checkcast. 4037 __ bind(Lresolved); 4038 // Get value klass in RobjKlass. 4039 __ load_klass(RobjKlass, R17_tos); 4040 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 4041 __ li(R17_tos, 1); 4042 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 4043 __ li(R17_tos, 0); 4044 4045 if (ProfileInterpreter) { 4046 __ b(Ldone); 4047 } 4048 4049 // Profile the null case. 4050 __ align(32, 12); 4051 __ bind(Lis_null); 4052 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 4053 4054 __ align(32, 12); 4055 __ bind(Ldone); 4056 } 4057 4058 // ============================================================================= 4059 // Breakpoints 4060 4061 void TemplateTable::_breakpoint() { 4062 transition(vtos, vtos); 4063 4064 // Get the unpatched byte code. 4065 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 4066 __ mr(R31, R3_RET); 4067 4068 // Post the breakpoint event. 4069 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 4070 4071 // Complete the execution of original bytecode. 4072 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 4073 } 4074 4075 // ============================================================================= 4076 // Exceptions 4077 4078 void TemplateTable::athrow() { 4079 transition(atos, vtos); 4080 4081 // Exception oop is in tos 4082 __ verify_oop(R17_tos); 4083 4084 __ null_check_throw(R17_tos, -1, R11_scratch1); 4085 4086 // Throw exception interpreter entry expects exception oop to be in R3. 4087 __ mr(R3_RET, R17_tos); 4088 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 4089 __ mtctr(R11_scratch1); 4090 __ bctr(); 4091 } 4092 4093 // ============================================================================= 4094 // Synchronization 4095 // Searches the basic object lock list on the stack for a free slot 4096 // and uses it to lock the obect in tos. 4097 // 4098 // Recursive locking is enabled by exiting the search if the same 4099 // object is already found in the list. Thus, a new basic lock obj lock 4100 // is allocated "higher up" in the stack and thus is found first 4101 // at next monitor exit. 4102 void TemplateTable::monitorenter() { 4103 transition(atos, vtos); 4104 4105 __ verify_oop(R17_tos); 4106 4107 Register Rcurrent_monitor = R11_scratch1, 4108 Rcurrent_obj = R12_scratch2, 4109 Robj_to_lock = R17_tos, 4110 Rscratch1 = R3_ARG1, 4111 Rscratch2 = R4_ARG2, 4112 Rscratch3 = R5_ARG3, 4113 Rcurrent_obj_addr = R6_ARG4; 4114 4115 // ------------------------------------------------------------------------------ 4116 // Null pointer exception. 4117 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4118 4119 // Try to acquire a lock on the object. 4120 // Repeat until succeeded (i.e., until monitorenter returns true). 4121 4122 // ------------------------------------------------------------------------------ 4123 // Find a free slot in the monitor block. 4124 Label Lfound, Lexit, Lallocate_new; 4125 ConditionRegister found_free_slot = CCR0, 4126 found_same_obj = CCR1, 4127 reached_limit = CCR6; 4128 { 4129 Label Lloop, Lentry; 4130 Register Rlimit = Rcurrent_monitor; 4131 4132 // Set up search loop - start with topmost monitor. 4133 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4134 4135 __ ld(Rlimit, 0, R1_SP); 4136 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4137 4138 // Check if any slot is present => short cut to allocation if not. 4139 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4140 __ bgt(reached_limit, Lallocate_new); 4141 4142 // Pre-load topmost slot. 4143 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4144 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4145 // The search loop. 4146 __ bind(Lloop); 4147 // Found free slot? 4148 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4149 // Is this entry for same obj? If so, stop the search and take the found 4150 // free slot or allocate a new one to enable recursive locking. 4151 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4152 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4153 __ beq(found_free_slot, Lexit); 4154 __ beq(found_same_obj, Lallocate_new); 4155 __ bgt(reached_limit, Lallocate_new); 4156 // Check if last allocated BasicLockObj reached. 4157 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4158 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4159 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4160 __ b(Lloop); 4161 } 4162 4163 // ------------------------------------------------------------------------------ 4164 // Check if we found a free slot. 4165 __ bind(Lexit); 4166 4167 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4168 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4169 __ b(Lfound); 4170 4171 // We didn't find a free BasicObjLock => allocate one. 4172 __ align(32, 12); 4173 __ bind(Lallocate_new); 4174 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4175 __ mr(Rcurrent_monitor, R26_monitor); 4176 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4177 4178 // ------------------------------------------------------------------------------ 4179 // We now have a slot to lock. 4180 __ bind(Lfound); 4181 4182 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4183 // The object has already been poped from the stack, so the expression stack looks correct. 4184 __ addi(R14_bcp, R14_bcp, 1); 4185 4186 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4187 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4188 4189 // Check if there's enough space on the stack for the monitors after locking. 4190 // This emits a single store. 4191 __ generate_stack_overflow_check(0); 4192 4193 // The bcp has already been incremented. Just need to dispatch to next instruction. 4194 __ dispatch_next(vtos); 4195 } 4196 4197 void TemplateTable::monitorexit() { 4198 transition(atos, vtos); 4199 __ verify_oop(R17_tos); 4200 4201 Register Rcurrent_monitor = R11_scratch1, 4202 Rcurrent_obj = R12_scratch2, 4203 Robj_to_lock = R17_tos, 4204 Rcurrent_obj_addr = R3_ARG1, 4205 Rlimit = R4_ARG2; 4206 Label Lfound, Lillegal_monitor_state; 4207 4208 // Check corner case: unbalanced monitorEnter / Exit. 4209 __ ld(Rlimit, 0, R1_SP); 4210 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4211 4212 // Null pointer check. 4213 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4214 4215 __ cmpld(CCR0, R26_monitor, Rlimit); 4216 __ bgt(CCR0, Lillegal_monitor_state); 4217 4218 // Find the corresponding slot in the monitors stack section. 4219 { 4220 Label Lloop; 4221 4222 // Start with topmost monitor. 4223 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4224 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4225 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4226 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4227 4228 __ bind(Lloop); 4229 // Is this entry for same obj? 4230 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4231 __ beq(CCR0, Lfound); 4232 4233 // Check if last allocated BasicLockObj reached. 4234 4235 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4236 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4237 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4238 4239 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4240 __ ble(CCR0, Lloop); 4241 } 4242 4243 // Fell through without finding the basic obj lock => throw up! 4244 __ bind(Lillegal_monitor_state); 4245 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4246 __ should_not_reach_here(); 4247 4248 __ align(32, 12); 4249 __ bind(Lfound); 4250 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4251 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4252 __ unlock_object(Rcurrent_monitor); 4253 } 4254 4255 // ============================================================================ 4256 // Wide bytecodes 4257 4258 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4259 void TemplateTable::wide() { 4260 transition(vtos, vtos); 4261 4262 const Register Rtable = R11_scratch1, 4263 Rindex = R12_scratch2, 4264 Rtmp = R0; 4265 4266 __ lbz(Rindex, 1, R14_bcp); 4267 4268 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4269 4270 __ slwi(Rindex, Rindex, LogBytesPerWord); 4271 __ ldx(Rtmp, Rtable, Rindex); 4272 __ mtctr(Rtmp); 4273 __ bctr(); 4274 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4275 }