1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/safepointMechanism.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/synchronizer.hpp" 41 #include "utilities/macros.hpp" 42 43 #undef __ 44 #define __ _masm-> 45 46 // ============================================================================ 47 // Misc helpers 48 49 // Do an oop store like *(base + index) = val OR *(base + offset) = val 50 // (only one of both variants is possible at the same time). 51 // Index can be noreg. 52 // Kills: 53 // Rbase, Rtmp 54 static void do_oop_store(InterpreterMacroAssembler* _masm, 55 Register Rbase, 56 RegisterOrConstant offset, 57 Register Rval, // Noreg means always null. 58 Register Rtmp1, 59 Register Rtmp2, 60 Register Rtmp3, 61 BarrierSet::Name barrier, 62 bool precise, 63 bool check_null) { 64 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 65 66 switch (barrier) { 67 #if INCLUDE_ALL_GCS 68 case BarrierSet::G1BarrierSet: 69 { 70 // Load and record the previous value. 71 __ g1_write_barrier_pre(Rbase, offset, 72 Rtmp3, /* holder of pre_val ? */ 73 Rtmp1, Rtmp2, false /* frame */); 74 75 Label Lnull, Ldone; 76 if (Rval != noreg) { 77 if (check_null) { 78 __ cmpdi(CCR0, Rval, 0); 79 __ beq(CCR0, Lnull); 80 } 81 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 82 // Mark the card. 83 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 84 __ add(Rbase, offset, Rbase); 85 } 86 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 87 if (check_null) { __ b(Ldone); } 88 } 89 90 if (Rval == noreg || check_null) { // Store null oop. 91 Register Rnull = Rval; 92 __ bind(Lnull); 93 if (Rval == noreg) { 94 Rnull = Rtmp1; 95 __ li(Rnull, 0); 96 } 97 if (UseCompressedOops) { 98 __ stw(Rnull, offset, Rbase); 99 } else { 100 __ std(Rnull, offset, Rbase); 101 } 102 } 103 __ bind(Ldone); 104 } 105 break; 106 #endif // INCLUDE_ALL_GCS 107 case BarrierSet::CardTableModRef: 108 { 109 Label Lnull, Ldone; 110 if (Rval != noreg) { 111 if (check_null) { 112 __ cmpdi(CCR0, Rval, 0); 113 __ beq(CCR0, Lnull); 114 } 115 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 116 // Mark the card. 117 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 118 __ add(Rbase, offset, Rbase); 119 } 120 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 121 if (check_null) { 122 __ b(Ldone); 123 } 124 } 125 126 if (Rval == noreg || check_null) { // Store null oop. 127 Register Rnull = Rval; 128 __ bind(Lnull); 129 if (Rval == noreg) { 130 Rnull = Rtmp1; 131 __ li(Rnull, 0); 132 } 133 if (UseCompressedOops) { 134 __ stw(Rnull, offset, Rbase); 135 } else { 136 __ std(Rnull, offset, Rbase); 137 } 138 } 139 __ bind(Ldone); 140 } 141 break; 142 case BarrierSet::ModRef: 143 ShouldNotReachHere(); 144 break; 145 default: 146 ShouldNotReachHere(); 147 } 148 } 149 150 // ============================================================================ 151 // Platform-dependent initialization 152 153 void TemplateTable::pd_initialize() { 154 // No ppc64 specific initialization. 155 } 156 157 Address TemplateTable::at_bcp(int offset) { 158 // Not used on ppc. 159 ShouldNotReachHere(); 160 return Address(); 161 } 162 163 // Patches the current bytecode (ptr to it located in bcp) 164 // in the bytecode stream with a new one. 165 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 166 // With sharing on, may need to test method flag. 167 if (!RewriteBytecodes) return; 168 Label L_patch_done; 169 170 switch (new_bc) { 171 case Bytecodes::_fast_aputfield: 172 case Bytecodes::_fast_bputfield: 173 case Bytecodes::_fast_zputfield: 174 case Bytecodes::_fast_cputfield: 175 case Bytecodes::_fast_dputfield: 176 case Bytecodes::_fast_fputfield: 177 case Bytecodes::_fast_iputfield: 178 case Bytecodes::_fast_lputfield: 179 case Bytecodes::_fast_sputfield: 180 { 181 // We skip bytecode quickening for putfield instructions when 182 // the put_code written to the constant pool cache is zero. 183 // This is required so that every execution of this instruction 184 // calls out to InterpreterRuntime::resolve_get_put to do 185 // additional, required work. 186 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 187 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 188 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 189 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 190 #if defined(VM_LITTLE_ENDIAN) 191 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 192 #else 193 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 194 #endif 195 __ cmpwi(CCR0, Rnew_bc, 0); 196 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 197 __ beq(CCR0, L_patch_done); 198 // __ isync(); // acquire not needed 199 break; 200 } 201 202 default: 203 assert(byte_no == -1, "sanity"); 204 if (load_bc_into_bc_reg) { 205 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 206 } 207 } 208 209 if (JvmtiExport::can_post_breakpoint()) { 210 Label L_fast_patch; 211 __ lbz(Rtemp, 0, R14_bcp); 212 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 213 __ bne(CCR0, L_fast_patch); 214 // Perform the quickening, slowly, in the bowels of the breakpoint table. 215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 216 __ b(L_patch_done); 217 __ bind(L_fast_patch); 218 } 219 220 // Patch bytecode. 221 __ stb(Rnew_bc, 0, R14_bcp); 222 223 __ bind(L_patch_done); 224 } 225 226 // ============================================================================ 227 // Individual instructions 228 229 void TemplateTable::nop() { 230 transition(vtos, vtos); 231 // Nothing to do. 232 } 233 234 void TemplateTable::shouldnotreachhere() { 235 transition(vtos, vtos); 236 __ stop("shouldnotreachhere bytecode"); 237 } 238 239 void TemplateTable::aconst_null() { 240 transition(vtos, atos); 241 __ li(R17_tos, 0); 242 } 243 244 void TemplateTable::iconst(int value) { 245 transition(vtos, itos); 246 assert(value >= -1 && value <= 5, ""); 247 __ li(R17_tos, value); 248 } 249 250 void TemplateTable::lconst(int value) { 251 transition(vtos, ltos); 252 assert(value >= -1 && value <= 5, ""); 253 __ li(R17_tos, value); 254 } 255 256 void TemplateTable::fconst(int value) { 257 transition(vtos, ftos); 258 static float zero = 0.0; 259 static float one = 1.0; 260 static float two = 2.0; 261 switch (value) { 262 default: ShouldNotReachHere(); 263 case 0: { 264 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 265 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 266 break; 267 } 268 case 1: { 269 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 270 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 271 break; 272 } 273 case 2: { 274 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 275 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 276 break; 277 } 278 } 279 } 280 281 void TemplateTable::dconst(int value) { 282 transition(vtos, dtos); 283 static double zero = 0.0; 284 static double one = 1.0; 285 switch (value) { 286 case 0: { 287 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 288 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 289 break; 290 } 291 case 1: { 292 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 293 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 294 break; 295 } 296 default: ShouldNotReachHere(); 297 } 298 } 299 300 void TemplateTable::bipush() { 301 transition(vtos, itos); 302 __ lbz(R17_tos, 1, R14_bcp); 303 __ extsb(R17_tos, R17_tos); 304 } 305 306 void TemplateTable::sipush() { 307 transition(vtos, itos); 308 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 309 } 310 311 void TemplateTable::ldc(bool wide) { 312 Register Rscratch1 = R11_scratch1, 313 Rscratch2 = R12_scratch2, 314 Rcpool = R3_ARG1; 315 316 transition(vtos, vtos); 317 Label notInt, notFloat, notClass, exit; 318 319 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 320 if (wide) { // Read index. 321 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 322 } else { 323 __ lbz(Rscratch1, 1, R14_bcp); 324 } 325 326 const int base_offset = ConstantPool::header_size() * wordSize; 327 const int tags_offset = Array<u1>::base_offset_in_bytes(); 328 329 // Get type from tags. 330 __ addi(Rscratch2, Rscratch2, tags_offset); 331 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 332 333 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 334 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 335 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 336 337 // Resolved class - need to call vm to get java mirror of the class. 338 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 339 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 340 __ beq(CCR0, notClass); 341 342 __ li(R4, wide ? 1 : 0); 343 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 344 __ push(atos); 345 __ b(exit); 346 347 __ align(32, 12); 348 __ bind(notClass); 349 __ addi(Rcpool, Rcpool, base_offset); 350 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 351 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 352 __ bne(CCR0, notInt); 353 __ lwax(R17_tos, Rcpool, Rscratch1); 354 __ push(itos); 355 __ b(exit); 356 357 __ align(32, 12); 358 __ bind(notInt); 359 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 360 __ bne(CCR0, notFloat); 361 __ lfsx(F15_ftos, Rcpool, Rscratch1); 362 __ push(ftos); 363 __ b(exit); 364 365 __ align(32, 12); 366 // assume the tag is for condy; if not, the VM runtime will tell us 367 __ bind(notFloat); 368 condy_helper(exit); 369 370 __ align(32, 12); 371 __ bind(exit); 372 } 373 374 // Fast path for caching oop constants. 375 void TemplateTable::fast_aldc(bool wide) { 376 transition(vtos, atos); 377 378 int index_size = wide ? sizeof(u2) : sizeof(u1); 379 const Register Rscratch = R11_scratch1; 380 Label is_null; 381 382 // We are resolved if the resolved reference cache entry contains a 383 // non-null object (CallSite, etc.) 384 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 385 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 386 387 // Convert null sentinel to NULL. 388 int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); 389 __ ld(Rscratch, simm16_rest, Rscratch); 390 __ cmpld(CCR0, R17_tos, Rscratch); 391 if (VM_Version::has_isel()) { 392 __ isel_0(R17_tos, CCR0, Assembler::equal); 393 } else { 394 Label not_sentinel; 395 __ bne(CCR0, not_sentinel); 396 __ li(R17_tos, 0); 397 __ bind(not_sentinel); 398 } 399 __ verify_oop(R17_tos); 400 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 401 402 __ bind(is_null); 403 __ load_const_optimized(R3_ARG1, (int)bytecode()); 404 405 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 406 407 // First time invocation - must resolve first. 408 __ call_VM(R17_tos, entry, R3_ARG1); 409 __ verify_oop(R17_tos); 410 } 411 412 void TemplateTable::ldc2_w() { 413 transition(vtos, vtos); 414 Label not_double, not_long, exit; 415 416 Register Rindex = R11_scratch1, 417 Rcpool = R12_scratch2, 418 Rtag = R3_ARG1; 419 __ get_cpool_and_tags(Rcpool, Rtag); 420 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 421 422 const int base_offset = ConstantPool::header_size() * wordSize; 423 const int tags_offset = Array<u1>::base_offset_in_bytes(); 424 // Get type from tags. 425 __ addi(Rcpool, Rcpool, base_offset); 426 __ addi(Rtag, Rtag, tags_offset); 427 428 __ lbzx(Rtag, Rtag, Rindex); 429 __ sldi(Rindex, Rindex, LogBytesPerWord); 430 431 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 432 __ bne(CCR0, not_double); 433 __ lfdx(F15_ftos, Rcpool, Rindex); 434 __ push(dtos); 435 __ b(exit); 436 437 __ bind(not_double); 438 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); 439 __ bne(CCR0, not_long); 440 __ ldx(R17_tos, Rcpool, Rindex); 441 __ push(ltos); 442 __ b(exit); 443 444 __ bind(not_long); 445 condy_helper(exit); 446 447 __ align(32, 12); 448 __ bind(exit); 449 } 450 451 void TemplateTable::condy_helper(Label& Done) { 452 const Register obj = R31; 453 const Register off = R11_scratch1; 454 const Register flags = R12_scratch2; 455 const Register rarg = R4_ARG2; 456 __ li(rarg, (int)bytecode()); 457 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); 458 __ get_vm_result_2(flags); 459 460 // VMr = obj = base address to find primitive value to push 461 // VMr2 = flags = (tos, off) using format of CPCE::_flags 462 __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); 463 464 // What sort of thing are we loading? 465 __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 466 467 switch (bytecode()) { 468 case Bytecodes::_ldc: 469 case Bytecodes::_ldc_w: 470 { 471 // tos in (itos, ftos, stos, btos, ctos, ztos) 472 Label notInt, notFloat, notShort, notByte, notChar, notBool; 473 __ cmplwi(CCR0, flags, itos); 474 __ bne(CCR0, notInt); 475 // itos 476 __ lwax(R17_tos, obj, off); 477 __ push(itos); 478 __ b(Done); 479 480 __ bind(notInt); 481 __ cmplwi(CCR0, flags, ftos); 482 __ bne(CCR0, notFloat); 483 // ftos 484 __ lfsx(F15_ftos, obj, off); 485 __ push(ftos); 486 __ b(Done); 487 488 __ bind(notFloat); 489 __ cmplwi(CCR0, flags, stos); 490 __ bne(CCR0, notShort); 491 // stos 492 __ lhax(R17_tos, obj, off); 493 __ push(stos); 494 __ b(Done); 495 496 __ bind(notShort); 497 __ cmplwi(CCR0, flags, btos); 498 __ bne(CCR0, notByte); 499 // btos 500 __ lbzx(R17_tos, obj, off); 501 __ extsb(R17_tos, R17_tos); 502 __ push(btos); 503 __ b(Done); 504 505 __ bind(notByte); 506 __ cmplwi(CCR0, flags, ctos); 507 __ bne(CCR0, notChar); 508 // ctos 509 __ lhzx(R17_tos, obj, off); 510 __ push(ctos); 511 __ b(Done); 512 513 __ bind(notChar); 514 __ cmplwi(CCR0, flags, ztos); 515 __ bne(CCR0, notBool); 516 // ztos 517 __ lbzx(R17_tos, obj, off); 518 __ push(ztos); 519 __ b(Done); 520 521 __ bind(notBool); 522 break; 523 } 524 525 case Bytecodes::_ldc2_w: 526 { 527 Label notLong, notDouble; 528 __ cmplwi(CCR0, flags, ltos); 529 __ bne(CCR0, notLong); 530 // ltos 531 __ ldx(R17_tos, obj, off); 532 __ push(ltos); 533 __ b(Done); 534 535 __ bind(notLong); 536 __ cmplwi(CCR0, flags, dtos); 537 __ bne(CCR0, notDouble); 538 // dtos 539 __ lfdx(F15_ftos, obj, off); 540 __ push(dtos); 541 __ b(Done); 542 543 __ bind(notDouble); 544 break; 545 } 546 547 default: 548 ShouldNotReachHere(); 549 } 550 551 __ stop("bad ldc/condy"); 552 } 553 554 // Get the locals index located in the bytecode stream at bcp + offset. 555 void TemplateTable::locals_index(Register Rdst, int offset) { 556 __ lbz(Rdst, offset, R14_bcp); 557 } 558 559 void TemplateTable::iload() { 560 iload_internal(); 561 } 562 563 void TemplateTable::nofast_iload() { 564 iload_internal(may_not_rewrite); 565 } 566 567 void TemplateTable::iload_internal(RewriteControl rc) { 568 transition(vtos, itos); 569 570 // Get the local value into tos 571 const Register Rindex = R22_tmp2; 572 locals_index(Rindex); 573 574 // Rewrite iload,iload pair into fast_iload2 575 // iload,caload pair into fast_icaload 576 if (RewriteFrequentPairs && rc == may_rewrite) { 577 Label Lrewrite, Ldone; 578 Register Rnext_byte = R3_ARG1, 579 Rrewrite_to = R6_ARG4, 580 Rscratch = R11_scratch1; 581 582 // get next byte 583 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 584 585 // if _iload, wait to rewrite to iload2. We only want to rewrite the 586 // last two iloads in a pair. Comparing against fast_iload means that 587 // the next bytecode is neither an iload or a caload, and therefore 588 // an iload pair. 589 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 590 __ beq(CCR0, Ldone); 591 592 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 593 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 594 __ beq(CCR1, Lrewrite); 595 596 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 597 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 598 __ beq(CCR0, Lrewrite); 599 600 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 601 602 __ bind(Lrewrite); 603 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 604 __ bind(Ldone); 605 } 606 607 __ load_local_int(R17_tos, Rindex, Rindex); 608 } 609 610 // Load 2 integers in a row without dispatching 611 void TemplateTable::fast_iload2() { 612 transition(vtos, itos); 613 614 __ lbz(R3_ARG1, 1, R14_bcp); 615 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 616 617 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 618 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 619 __ push_i(R3_ARG1); 620 } 621 622 void TemplateTable::fast_iload() { 623 transition(vtos, itos); 624 // Get the local value into tos 625 626 const Register Rindex = R11_scratch1; 627 locals_index(Rindex); 628 __ load_local_int(R17_tos, Rindex, Rindex); 629 } 630 631 // Load a local variable type long from locals area to TOS cache register. 632 // Local index resides in bytecodestream. 633 void TemplateTable::lload() { 634 transition(vtos, ltos); 635 636 const Register Rindex = R11_scratch1; 637 locals_index(Rindex); 638 __ load_local_long(R17_tos, Rindex, Rindex); 639 } 640 641 void TemplateTable::fload() { 642 transition(vtos, ftos); 643 644 const Register Rindex = R11_scratch1; 645 locals_index(Rindex); 646 __ load_local_float(F15_ftos, Rindex, Rindex); 647 } 648 649 void TemplateTable::dload() { 650 transition(vtos, dtos); 651 652 const Register Rindex = R11_scratch1; 653 locals_index(Rindex); 654 __ load_local_double(F15_ftos, Rindex, Rindex); 655 } 656 657 void TemplateTable::aload() { 658 transition(vtos, atos); 659 660 const Register Rindex = R11_scratch1; 661 locals_index(Rindex); 662 __ load_local_ptr(R17_tos, Rindex, Rindex); 663 } 664 665 void TemplateTable::locals_index_wide(Register Rdst) { 666 // Offset is 2, not 1, because Lbcp points to wide prefix code. 667 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 668 } 669 670 void TemplateTable::wide_iload() { 671 // Get the local value into tos. 672 673 const Register Rindex = R11_scratch1; 674 locals_index_wide(Rindex); 675 __ load_local_int(R17_tos, Rindex, Rindex); 676 } 677 678 void TemplateTable::wide_lload() { 679 transition(vtos, ltos); 680 681 const Register Rindex = R11_scratch1; 682 locals_index_wide(Rindex); 683 __ load_local_long(R17_tos, Rindex, Rindex); 684 } 685 686 void TemplateTable::wide_fload() { 687 transition(vtos, ftos); 688 689 const Register Rindex = R11_scratch1; 690 locals_index_wide(Rindex); 691 __ load_local_float(F15_ftos, Rindex, Rindex); 692 } 693 694 void TemplateTable::wide_dload() { 695 transition(vtos, dtos); 696 697 const Register Rindex = R11_scratch1; 698 locals_index_wide(Rindex); 699 __ load_local_double(F15_ftos, Rindex, Rindex); 700 } 701 702 void TemplateTable::wide_aload() { 703 transition(vtos, atos); 704 705 const Register Rindex = R11_scratch1; 706 locals_index_wide(Rindex); 707 __ load_local_ptr(R17_tos, Rindex, Rindex); 708 } 709 710 void TemplateTable::iaload() { 711 transition(itos, itos); 712 713 const Register Rload_addr = R3_ARG1, 714 Rarray = R4_ARG2, 715 Rtemp = R5_ARG3; 716 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 717 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 718 } 719 720 void TemplateTable::laload() { 721 transition(itos, ltos); 722 723 const Register Rload_addr = R3_ARG1, 724 Rarray = R4_ARG2, 725 Rtemp = R5_ARG3; 726 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 727 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 728 } 729 730 void TemplateTable::faload() { 731 transition(itos, ftos); 732 733 const Register Rload_addr = R3_ARG1, 734 Rarray = R4_ARG2, 735 Rtemp = R5_ARG3; 736 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 737 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 738 } 739 740 void TemplateTable::daload() { 741 transition(itos, dtos); 742 743 const Register Rload_addr = R3_ARG1, 744 Rarray = R4_ARG2, 745 Rtemp = R5_ARG3; 746 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 747 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 748 } 749 750 void TemplateTable::aaload() { 751 transition(itos, atos); 752 753 // tos: index 754 // result tos: array 755 const Register Rload_addr = R3_ARG1, 756 Rarray = R4_ARG2, 757 Rtemp = R5_ARG3; 758 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 759 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 760 __ verify_oop(R17_tos); 761 //__ dcbt(R17_tos); // prefetch 762 } 763 764 void TemplateTable::baload() { 765 transition(itos, itos); 766 767 const Register Rload_addr = R3_ARG1, 768 Rarray = R4_ARG2, 769 Rtemp = R5_ARG3; 770 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 771 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 772 __ extsb(R17_tos, R17_tos); 773 } 774 775 void TemplateTable::caload() { 776 transition(itos, itos); 777 778 const Register Rload_addr = R3_ARG1, 779 Rarray = R4_ARG2, 780 Rtemp = R5_ARG3; 781 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 782 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 783 } 784 785 // Iload followed by caload frequent pair. 786 void TemplateTable::fast_icaload() { 787 transition(vtos, itos); 788 789 const Register Rload_addr = R3_ARG1, 790 Rarray = R4_ARG2, 791 Rtemp = R11_scratch1; 792 793 locals_index(R17_tos); 794 __ load_local_int(R17_tos, Rtemp, R17_tos); 795 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 796 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 797 } 798 799 void TemplateTable::saload() { 800 transition(itos, itos); 801 802 const Register Rload_addr = R11_scratch1, 803 Rarray = R12_scratch2, 804 Rtemp = R3_ARG1; 805 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 806 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 807 } 808 809 void TemplateTable::iload(int n) { 810 transition(vtos, itos); 811 812 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 813 } 814 815 void TemplateTable::lload(int n) { 816 transition(vtos, ltos); 817 818 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 819 } 820 821 void TemplateTable::fload(int n) { 822 transition(vtos, ftos); 823 824 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 825 } 826 827 void TemplateTable::dload(int n) { 828 transition(vtos, dtos); 829 830 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 831 } 832 833 void TemplateTable::aload(int n) { 834 transition(vtos, atos); 835 836 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 837 } 838 839 void TemplateTable::aload_0() { 840 aload_0_internal(); 841 } 842 843 void TemplateTable::nofast_aload_0() { 844 aload_0_internal(may_not_rewrite); 845 } 846 847 void TemplateTable::aload_0_internal(RewriteControl rc) { 848 transition(vtos, atos); 849 // According to bytecode histograms, the pairs: 850 // 851 // _aload_0, _fast_igetfield 852 // _aload_0, _fast_agetfield 853 // _aload_0, _fast_fgetfield 854 // 855 // occur frequently. If RewriteFrequentPairs is set, the (slow) 856 // _aload_0 bytecode checks if the next bytecode is either 857 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 858 // rewrites the current bytecode into a pair bytecode; otherwise it 859 // rewrites the current bytecode into _0 that doesn't do 860 // the pair check anymore. 861 // 862 // Note: If the next bytecode is _getfield, the rewrite must be 863 // delayed, otherwise we may miss an opportunity for a pair. 864 // 865 // Also rewrite frequent pairs 866 // aload_0, aload_1 867 // aload_0, iload_1 868 // These bytecodes with a small amount of code are most profitable 869 // to rewrite. 870 871 if (RewriteFrequentPairs && rc == may_rewrite) { 872 873 Label Lrewrite, Ldont_rewrite; 874 Register Rnext_byte = R3_ARG1, 875 Rrewrite_to = R6_ARG4, 876 Rscratch = R11_scratch1; 877 878 // Get next byte. 879 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 880 881 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 882 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 883 __ beq(CCR0, Ldont_rewrite); 884 885 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 886 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 887 __ beq(CCR1, Lrewrite); 888 889 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 890 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 891 __ beq(CCR0, Lrewrite); 892 893 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 894 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 895 __ beq(CCR1, Lrewrite); 896 897 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 898 899 __ bind(Lrewrite); 900 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 901 __ bind(Ldont_rewrite); 902 } 903 904 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 905 aload(0); 906 } 907 908 void TemplateTable::istore() { 909 transition(itos, vtos); 910 911 const Register Rindex = R11_scratch1; 912 locals_index(Rindex); 913 __ store_local_int(R17_tos, Rindex); 914 } 915 916 void TemplateTable::lstore() { 917 transition(ltos, vtos); 918 const Register Rindex = R11_scratch1; 919 locals_index(Rindex); 920 __ store_local_long(R17_tos, Rindex); 921 } 922 923 void TemplateTable::fstore() { 924 transition(ftos, vtos); 925 926 const Register Rindex = R11_scratch1; 927 locals_index(Rindex); 928 __ store_local_float(F15_ftos, Rindex); 929 } 930 931 void TemplateTable::dstore() { 932 transition(dtos, vtos); 933 934 const Register Rindex = R11_scratch1; 935 locals_index(Rindex); 936 __ store_local_double(F15_ftos, Rindex); 937 } 938 939 void TemplateTable::astore() { 940 transition(vtos, vtos); 941 942 const Register Rindex = R11_scratch1; 943 __ pop_ptr(); 944 __ verify_oop_or_return_address(R17_tos, Rindex); 945 locals_index(Rindex); 946 __ store_local_ptr(R17_tos, Rindex); 947 } 948 949 void TemplateTable::wide_istore() { 950 transition(vtos, vtos); 951 952 const Register Rindex = R11_scratch1; 953 __ pop_i(); 954 locals_index_wide(Rindex); 955 __ store_local_int(R17_tos, Rindex); 956 } 957 958 void TemplateTable::wide_lstore() { 959 transition(vtos, vtos); 960 961 const Register Rindex = R11_scratch1; 962 __ pop_l(); 963 locals_index_wide(Rindex); 964 __ store_local_long(R17_tos, Rindex); 965 } 966 967 void TemplateTable::wide_fstore() { 968 transition(vtos, vtos); 969 970 const Register Rindex = R11_scratch1; 971 __ pop_f(); 972 locals_index_wide(Rindex); 973 __ store_local_float(F15_ftos, Rindex); 974 } 975 976 void TemplateTable::wide_dstore() { 977 transition(vtos, vtos); 978 979 const Register Rindex = R11_scratch1; 980 __ pop_d(); 981 locals_index_wide(Rindex); 982 __ store_local_double(F15_ftos, Rindex); 983 } 984 985 void TemplateTable::wide_astore() { 986 transition(vtos, vtos); 987 988 const Register Rindex = R11_scratch1; 989 __ pop_ptr(); 990 __ verify_oop_or_return_address(R17_tos, Rindex); 991 locals_index_wide(Rindex); 992 __ store_local_ptr(R17_tos, Rindex); 993 } 994 995 void TemplateTable::iastore() { 996 transition(itos, vtos); 997 998 const Register Rindex = R3_ARG1, 999 Rstore_addr = R4_ARG2, 1000 Rarray = R5_ARG3, 1001 Rtemp = R6_ARG4; 1002 __ pop_i(Rindex); 1003 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 1004 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 1005 } 1006 1007 void TemplateTable::lastore() { 1008 transition(ltos, vtos); 1009 1010 const Register Rindex = R3_ARG1, 1011 Rstore_addr = R4_ARG2, 1012 Rarray = R5_ARG3, 1013 Rtemp = R6_ARG4; 1014 __ pop_i(Rindex); 1015 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 1016 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 1017 } 1018 1019 void TemplateTable::fastore() { 1020 transition(ftos, vtos); 1021 1022 const Register Rindex = R3_ARG1, 1023 Rstore_addr = R4_ARG2, 1024 Rarray = R5_ARG3, 1025 Rtemp = R6_ARG4; 1026 __ pop_i(Rindex); 1027 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 1028 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 1029 } 1030 1031 void TemplateTable::dastore() { 1032 transition(dtos, vtos); 1033 1034 const Register Rindex = R3_ARG1, 1035 Rstore_addr = R4_ARG2, 1036 Rarray = R5_ARG3, 1037 Rtemp = R6_ARG4; 1038 __ pop_i(Rindex); 1039 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 1040 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 1041 } 1042 1043 // Pop 3 values from the stack and... 1044 void TemplateTable::aastore() { 1045 transition(vtos, vtos); 1046 1047 Label Lstore_ok, Lis_null, Ldone; 1048 const Register Rindex = R3_ARG1, 1049 Rarray = R4_ARG2, 1050 Rscratch = R11_scratch1, 1051 Rscratch2 = R12_scratch2, 1052 Rarray_klass = R5_ARG3, 1053 Rarray_element_klass = Rarray_klass, 1054 Rvalue_klass = R6_ARG4, 1055 Rstore_addr = R31; // Use register which survives VM call. 1056 1057 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 1058 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 1059 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 1060 1061 __ verify_oop(R17_tos); 1062 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 1063 // Rindex is dead! 1064 Register Rscratch3 = Rindex; 1065 1066 // Do array store check - check for NULL value first. 1067 __ cmpdi(CCR0, R17_tos, 0); 1068 __ beq(CCR0, Lis_null); 1069 1070 __ load_klass(Rarray_klass, Rarray); 1071 __ load_klass(Rvalue_klass, R17_tos); 1072 1073 // Do fast instanceof cache test. 1074 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 1075 1076 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 1077 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 1078 1079 // Fell through: subtype check failed => throw an exception. 1080 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 1081 __ mtctr(R11_scratch1); 1082 __ bctr(); 1083 1084 __ bind(Lis_null); 1085 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 1086 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 1087 __ profile_null_seen(Rscratch, Rscratch2); 1088 __ b(Ldone); 1089 1090 // Store is OK. 1091 __ bind(Lstore_ok); 1092 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 1093 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 1094 1095 __ bind(Ldone); 1096 // Adjust sp (pops array, index and value). 1097 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 1098 } 1099 1100 void TemplateTable::bastore() { 1101 transition(itos, vtos); 1102 1103 const Register Rindex = R11_scratch1, 1104 Rarray = R12_scratch2, 1105 Rscratch = R3_ARG1; 1106 __ pop_i(Rindex); 1107 __ pop_ptr(Rarray); 1108 // tos: val 1109 1110 // Need to check whether array is boolean or byte 1111 // since both types share the bastore bytecode. 1112 __ load_klass(Rscratch, Rarray); 1113 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 1114 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 1115 __ testbitdi(CCR0, R0, Rscratch, diffbit); 1116 Label L_skip; 1117 __ bfalse(CCR0, L_skip); 1118 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1119 __ bind(L_skip); 1120 1121 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 1122 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 1123 } 1124 1125 void TemplateTable::castore() { 1126 transition(itos, vtos); 1127 1128 const Register Rindex = R11_scratch1, 1129 Rarray = R12_scratch2, 1130 Rscratch = R3_ARG1; 1131 __ pop_i(Rindex); 1132 // tos: val 1133 // Rarray: array ptr (popped by index_check) 1134 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1135 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1136 } 1137 1138 void TemplateTable::sastore() { 1139 castore(); 1140 } 1141 1142 void TemplateTable::istore(int n) { 1143 transition(itos, vtos); 1144 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1145 } 1146 1147 void TemplateTable::lstore(int n) { 1148 transition(ltos, vtos); 1149 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1150 } 1151 1152 void TemplateTable::fstore(int n) { 1153 transition(ftos, vtos); 1154 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1155 } 1156 1157 void TemplateTable::dstore(int n) { 1158 transition(dtos, vtos); 1159 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1160 } 1161 1162 void TemplateTable::astore(int n) { 1163 transition(vtos, vtos); 1164 1165 __ pop_ptr(); 1166 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1167 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1168 } 1169 1170 void TemplateTable::pop() { 1171 transition(vtos, vtos); 1172 1173 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1174 } 1175 1176 void TemplateTable::pop2() { 1177 transition(vtos, vtos); 1178 1179 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1180 } 1181 1182 void TemplateTable::dup() { 1183 transition(vtos, vtos); 1184 1185 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1186 __ push_ptr(R11_scratch1); 1187 } 1188 1189 void TemplateTable::dup_x1() { 1190 transition(vtos, vtos); 1191 1192 Register Ra = R11_scratch1, 1193 Rb = R12_scratch2; 1194 // stack: ..., a, b 1195 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1196 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1197 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1198 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1199 __ push_ptr(Rb); 1200 // stack: ..., b, a, b 1201 } 1202 1203 void TemplateTable::dup_x2() { 1204 transition(vtos, vtos); 1205 1206 Register Ra = R11_scratch1, 1207 Rb = R12_scratch2, 1208 Rc = R3_ARG1; 1209 1210 // stack: ..., a, b, c 1211 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1212 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1213 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1214 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1215 // stack: ..., c, b, c 1216 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1217 // stack: ..., c, a, c 1218 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1219 __ push_ptr(Rc); // push c 1220 // stack: ..., c, a, b, c 1221 } 1222 1223 void TemplateTable::dup2() { 1224 transition(vtos, vtos); 1225 1226 Register Ra = R11_scratch1, 1227 Rb = R12_scratch2; 1228 // stack: ..., a, b 1229 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1230 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1231 __ push_2ptrs(Ra, Rb); 1232 // stack: ..., a, b, a, b 1233 } 1234 1235 void TemplateTable::dup2_x1() { 1236 transition(vtos, vtos); 1237 1238 Register Ra = R11_scratch1, 1239 Rb = R12_scratch2, 1240 Rc = R3_ARG1; 1241 // stack: ..., a, b, c 1242 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1243 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1244 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1245 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1246 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1247 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1248 // stack: ..., b, c, a 1249 __ push_2ptrs(Rb, Rc); 1250 // stack: ..., b, c, a, b, c 1251 } 1252 1253 void TemplateTable::dup2_x2() { 1254 transition(vtos, vtos); 1255 1256 Register Ra = R11_scratch1, 1257 Rb = R12_scratch2, 1258 Rc = R3_ARG1, 1259 Rd = R4_ARG2; 1260 // stack: ..., a, b, c, d 1261 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1262 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1263 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1264 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1265 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1266 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1267 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1268 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1269 // stack: ..., c, d, a, b 1270 __ push_2ptrs(Rc, Rd); 1271 // stack: ..., c, d, a, b, c, d 1272 } 1273 1274 void TemplateTable::swap() { 1275 transition(vtos, vtos); 1276 // stack: ..., a, b 1277 1278 Register Ra = R11_scratch1, 1279 Rb = R12_scratch2; 1280 // stack: ..., a, b 1281 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1282 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1283 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1284 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1285 // stack: ..., b, a 1286 } 1287 1288 void TemplateTable::iop2(Operation op) { 1289 transition(itos, itos); 1290 1291 Register Rscratch = R11_scratch1; 1292 1293 __ pop_i(Rscratch); 1294 // tos = number of bits to shift 1295 // Rscratch = value to shift 1296 switch (op) { 1297 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1298 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1299 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1300 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1301 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1302 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1303 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1304 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1305 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1306 default: ShouldNotReachHere(); 1307 } 1308 } 1309 1310 void TemplateTable::lop2(Operation op) { 1311 transition(ltos, ltos); 1312 1313 Register Rscratch = R11_scratch1; 1314 __ pop_l(Rscratch); 1315 switch (op) { 1316 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1317 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1318 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1319 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1320 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1321 default: ShouldNotReachHere(); 1322 } 1323 } 1324 1325 void TemplateTable::idiv() { 1326 transition(itos, itos); 1327 1328 Label Lnormal, Lexception, Ldone; 1329 Register Rdividend = R11_scratch1; // Used by irem. 1330 1331 __ addi(R0, R17_tos, 1); 1332 __ cmplwi(CCR0, R0, 2); 1333 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1334 1335 __ cmpwi(CCR1, R17_tos, 0); 1336 __ beq(CCR1, Lexception); // divisor == 0 1337 1338 __ pop_i(Rdividend); 1339 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1340 __ b(Ldone); 1341 1342 __ bind(Lexception); 1343 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1344 __ mtctr(R11_scratch1); 1345 __ bctr(); 1346 1347 __ align(32, 12); 1348 __ bind(Lnormal); 1349 __ pop_i(Rdividend); 1350 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1351 __ bind(Ldone); 1352 } 1353 1354 void TemplateTable::irem() { 1355 transition(itos, itos); 1356 1357 __ mr(R12_scratch2, R17_tos); 1358 idiv(); 1359 __ mullw(R17_tos, R17_tos, R12_scratch2); 1360 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1361 } 1362 1363 void TemplateTable::lmul() { 1364 transition(ltos, ltos); 1365 1366 __ pop_l(R11_scratch1); 1367 __ mulld(R17_tos, R11_scratch1, R17_tos); 1368 } 1369 1370 void TemplateTable::ldiv() { 1371 transition(ltos, ltos); 1372 1373 Label Lnormal, Lexception, Ldone; 1374 Register Rdividend = R11_scratch1; // Used by lrem. 1375 1376 __ addi(R0, R17_tos, 1); 1377 __ cmpldi(CCR0, R0, 2); 1378 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1379 1380 __ cmpdi(CCR1, R17_tos, 0); 1381 __ beq(CCR1, Lexception); // divisor == 0 1382 1383 __ pop_l(Rdividend); 1384 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1385 __ b(Ldone); 1386 1387 __ bind(Lexception); 1388 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1389 __ mtctr(R11_scratch1); 1390 __ bctr(); 1391 1392 __ align(32, 12); 1393 __ bind(Lnormal); 1394 __ pop_l(Rdividend); 1395 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1396 __ bind(Ldone); 1397 } 1398 1399 void TemplateTable::lrem() { 1400 transition(ltos, ltos); 1401 1402 __ mr(R12_scratch2, R17_tos); 1403 ldiv(); 1404 __ mulld(R17_tos, R17_tos, R12_scratch2); 1405 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1406 } 1407 1408 void TemplateTable::lshl() { 1409 transition(itos, ltos); 1410 1411 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1412 __ pop_l(R11_scratch1); 1413 __ sld(R17_tos, R11_scratch1, R17_tos); 1414 } 1415 1416 void TemplateTable::lshr() { 1417 transition(itos, ltos); 1418 1419 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1420 __ pop_l(R11_scratch1); 1421 __ srad(R17_tos, R11_scratch1, R17_tos); 1422 } 1423 1424 void TemplateTable::lushr() { 1425 transition(itos, ltos); 1426 1427 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1428 __ pop_l(R11_scratch1); 1429 __ srd(R17_tos, R11_scratch1, R17_tos); 1430 } 1431 1432 void TemplateTable::fop2(Operation op) { 1433 transition(ftos, ftos); 1434 1435 switch (op) { 1436 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1437 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1438 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1439 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1440 case rem: 1441 __ pop_f(F1_ARG1); 1442 __ fmr(F2_ARG2, F15_ftos); 1443 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1444 __ fmr(F15_ftos, F1_RET); 1445 break; 1446 1447 default: ShouldNotReachHere(); 1448 } 1449 } 1450 1451 void TemplateTable::dop2(Operation op) { 1452 transition(dtos, dtos); 1453 1454 switch (op) { 1455 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1456 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1457 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1458 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1459 case rem: 1460 __ pop_d(F1_ARG1); 1461 __ fmr(F2_ARG2, F15_ftos); 1462 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1463 __ fmr(F15_ftos, F1_RET); 1464 break; 1465 1466 default: ShouldNotReachHere(); 1467 } 1468 } 1469 1470 // Negate the value in the TOS cache. 1471 void TemplateTable::ineg() { 1472 transition(itos, itos); 1473 1474 __ neg(R17_tos, R17_tos); 1475 } 1476 1477 // Negate the value in the TOS cache. 1478 void TemplateTable::lneg() { 1479 transition(ltos, ltos); 1480 1481 __ neg(R17_tos, R17_tos); 1482 } 1483 1484 void TemplateTable::fneg() { 1485 transition(ftos, ftos); 1486 1487 __ fneg(F15_ftos, F15_ftos); 1488 } 1489 1490 void TemplateTable::dneg() { 1491 transition(dtos, dtos); 1492 1493 __ fneg(F15_ftos, F15_ftos); 1494 } 1495 1496 // Increments a local variable in place. 1497 void TemplateTable::iinc() { 1498 transition(vtos, vtos); 1499 1500 const Register Rindex = R11_scratch1, 1501 Rincrement = R0, 1502 Rvalue = R12_scratch2; 1503 1504 locals_index(Rindex); // Load locals index from bytecode stream. 1505 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1506 __ extsb(Rincrement, Rincrement); 1507 1508 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1509 1510 __ add(Rvalue, Rincrement, Rvalue); 1511 __ stw(Rvalue, 0, Rindex); 1512 } 1513 1514 void TemplateTable::wide_iinc() { 1515 transition(vtos, vtos); 1516 1517 Register Rindex = R11_scratch1, 1518 Rlocals_addr = Rindex, 1519 Rincr = R12_scratch2; 1520 locals_index_wide(Rindex); 1521 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1522 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1523 __ add(R17_tos, Rincr, R17_tos); 1524 __ stw(R17_tos, 0, Rlocals_addr); 1525 } 1526 1527 void TemplateTable::convert() { 1528 // %%%%% Factor this first part accross platforms 1529 #ifdef ASSERT 1530 TosState tos_in = ilgl; 1531 TosState tos_out = ilgl; 1532 switch (bytecode()) { 1533 case Bytecodes::_i2l: // fall through 1534 case Bytecodes::_i2f: // fall through 1535 case Bytecodes::_i2d: // fall through 1536 case Bytecodes::_i2b: // fall through 1537 case Bytecodes::_i2c: // fall through 1538 case Bytecodes::_i2s: tos_in = itos; break; 1539 case Bytecodes::_l2i: // fall through 1540 case Bytecodes::_l2f: // fall through 1541 case Bytecodes::_l2d: tos_in = ltos; break; 1542 case Bytecodes::_f2i: // fall through 1543 case Bytecodes::_f2l: // fall through 1544 case Bytecodes::_f2d: tos_in = ftos; break; 1545 case Bytecodes::_d2i: // fall through 1546 case Bytecodes::_d2l: // fall through 1547 case Bytecodes::_d2f: tos_in = dtos; break; 1548 default : ShouldNotReachHere(); 1549 } 1550 switch (bytecode()) { 1551 case Bytecodes::_l2i: // fall through 1552 case Bytecodes::_f2i: // fall through 1553 case Bytecodes::_d2i: // fall through 1554 case Bytecodes::_i2b: // fall through 1555 case Bytecodes::_i2c: // fall through 1556 case Bytecodes::_i2s: tos_out = itos; break; 1557 case Bytecodes::_i2l: // fall through 1558 case Bytecodes::_f2l: // fall through 1559 case Bytecodes::_d2l: tos_out = ltos; break; 1560 case Bytecodes::_i2f: // fall through 1561 case Bytecodes::_l2f: // fall through 1562 case Bytecodes::_d2f: tos_out = ftos; break; 1563 case Bytecodes::_i2d: // fall through 1564 case Bytecodes::_l2d: // fall through 1565 case Bytecodes::_f2d: tos_out = dtos; break; 1566 default : ShouldNotReachHere(); 1567 } 1568 transition(tos_in, tos_out); 1569 #endif 1570 1571 // Conversion 1572 Label done; 1573 switch (bytecode()) { 1574 case Bytecodes::_i2l: 1575 __ extsw(R17_tos, R17_tos); 1576 break; 1577 1578 case Bytecodes::_l2i: 1579 // Nothing to do, we'll continue to work with the lower bits. 1580 break; 1581 1582 case Bytecodes::_i2b: 1583 __ extsb(R17_tos, R17_tos); 1584 break; 1585 1586 case Bytecodes::_i2c: 1587 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1588 break; 1589 1590 case Bytecodes::_i2s: 1591 __ extsh(R17_tos, R17_tos); 1592 break; 1593 1594 case Bytecodes::_i2d: 1595 __ extsw(R17_tos, R17_tos); 1596 case Bytecodes::_l2d: 1597 __ move_l_to_d(); 1598 __ fcfid(F15_ftos, F15_ftos); 1599 break; 1600 1601 case Bytecodes::_i2f: 1602 __ extsw(R17_tos, R17_tos); 1603 __ move_l_to_d(); 1604 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1605 // Comment: alternatively, load with sign extend could be done by lfiwax. 1606 __ fcfids(F15_ftos, F15_ftos); 1607 } else { 1608 __ fcfid(F15_ftos, F15_ftos); 1609 __ frsp(F15_ftos, F15_ftos); 1610 } 1611 break; 1612 1613 case Bytecodes::_l2f: 1614 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1615 __ move_l_to_d(); 1616 __ fcfids(F15_ftos, F15_ftos); 1617 } else { 1618 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1619 __ mr(R3_ARG1, R17_tos); 1620 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1621 __ fmr(F15_ftos, F1_RET); 1622 } 1623 break; 1624 1625 case Bytecodes::_f2d: 1626 // empty 1627 break; 1628 1629 case Bytecodes::_d2f: 1630 __ frsp(F15_ftos, F15_ftos); 1631 break; 1632 1633 case Bytecodes::_d2i: 1634 case Bytecodes::_f2i: 1635 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1636 __ li(R17_tos, 0); // 0 in case of NAN 1637 __ bso(CCR0, done); 1638 __ fctiwz(F15_ftos, F15_ftos); 1639 __ move_d_to_l(); 1640 break; 1641 1642 case Bytecodes::_d2l: 1643 case Bytecodes::_f2l: 1644 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1645 __ li(R17_tos, 0); // 0 in case of NAN 1646 __ bso(CCR0, done); 1647 __ fctidz(F15_ftos, F15_ftos); 1648 __ move_d_to_l(); 1649 break; 1650 1651 default: ShouldNotReachHere(); 1652 } 1653 __ bind(done); 1654 } 1655 1656 // Long compare 1657 void TemplateTable::lcmp() { 1658 transition(ltos, itos); 1659 1660 const Register Rscratch = R11_scratch1; 1661 __ pop_l(Rscratch); // first operand, deeper in stack 1662 1663 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1664 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1665 __ srwi(Rscratch, R17_tos, 30); 1666 __ srawi(R17_tos, R17_tos, 31); 1667 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1668 } 1669 1670 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1671 // unordered_result == -1 => fcmpl or dcmpl 1672 // unordered_result == 1 => fcmpg or dcmpg 1673 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1674 const FloatRegister Rfirst = F0_SCRATCH, 1675 Rsecond = F15_ftos; 1676 const Register Rscratch = R11_scratch1; 1677 1678 if (is_float) { 1679 __ pop_f(Rfirst); 1680 } else { 1681 __ pop_d(Rfirst); 1682 } 1683 1684 Label Lunordered, Ldone; 1685 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1686 if (unordered_result) { 1687 __ bso(CCR0, Lunordered); 1688 } 1689 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1690 __ srwi(Rscratch, R17_tos, 30); 1691 __ srawi(R17_tos, R17_tos, 31); 1692 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1693 if (unordered_result) { 1694 __ b(Ldone); 1695 __ bind(Lunordered); 1696 __ load_const_optimized(R17_tos, unordered_result); 1697 } 1698 __ bind(Ldone); 1699 } 1700 1701 // Branch_conditional which takes TemplateTable::Condition. 1702 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1703 bool positive = false; 1704 Assembler::Condition cond = Assembler::equal; 1705 switch (cc) { 1706 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1707 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1708 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1709 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1710 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1711 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1712 default: ShouldNotReachHere(); 1713 } 1714 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1715 int bi = Assembler::bi0(crx, cond); 1716 __ bc(bo, bi, L); 1717 } 1718 1719 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1720 1721 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1722 __ verify_thread(); 1723 1724 const Register Rscratch1 = R11_scratch1, 1725 Rscratch2 = R12_scratch2, 1726 Rscratch3 = R3_ARG1, 1727 R4_counters = R4_ARG2, 1728 bumped_count = R31, 1729 Rdisp = R22_tmp2; 1730 1731 __ profile_taken_branch(Rscratch1, bumped_count); 1732 1733 // Get (wide) offset. 1734 if (is_wide) { 1735 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1736 } else { 1737 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1738 } 1739 1740 // -------------------------------------------------------------------------- 1741 // Handle all the JSR stuff here, then exit. 1742 // It's much shorter and cleaner than intermingling with the 1743 // non-JSR normal-branch stuff occurring below. 1744 if (is_jsr) { 1745 // Compute return address as bci in Otos_i. 1746 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1747 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1748 __ subf(R17_tos, Rscratch1, Rscratch2); 1749 1750 // Bump bcp to target of JSR. 1751 __ add(R14_bcp, Rdisp, R14_bcp); 1752 // Push returnAddress for "ret" on stack. 1753 __ push_ptr(R17_tos); 1754 // And away we go! 1755 __ dispatch_next(vtos, 0 ,true); 1756 return; 1757 } 1758 1759 // -------------------------------------------------------------------------- 1760 // Normal (non-jsr) branch handling 1761 1762 // Bump bytecode pointer by displacement (take the branch). 1763 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1764 1765 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1766 if (increment_invocation_counter_for_backward_branches) { 1767 Label Lforward; 1768 1769 // Check branch direction. 1770 __ cmpdi(CCR0, Rdisp, 0); 1771 __ bgt(CCR0, Lforward); 1772 1773 __ get_method_counters(R19_method, R4_counters, Lforward); 1774 1775 if (TieredCompilation) { 1776 Label Lno_mdo, Loverflow; 1777 const int increment = InvocationCounter::count_increment; 1778 if (ProfileInterpreter) { 1779 Register Rmdo = Rscratch1; 1780 1781 // If no method data exists, go to profile_continue. 1782 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1783 __ cmpdi(CCR0, Rmdo, 0); 1784 __ beq(CCR0, Lno_mdo); 1785 1786 // Increment backedge counter in the MDO. 1787 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1788 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1789 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo); 1790 __ addi(Rscratch2, Rscratch2, increment); 1791 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1792 if (UseOnStackReplacement) { 1793 __ and_(Rscratch3, Rscratch2, Rscratch3); 1794 __ bne(CCR0, Lforward); 1795 __ b(Loverflow); 1796 } else { 1797 __ b(Lforward); 1798 } 1799 } 1800 1801 // If there's no MDO, increment counter in method. 1802 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1803 __ bind(Lno_mdo); 1804 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1805 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters); 1806 __ addi(Rscratch2, Rscratch2, increment); 1807 __ stw(Rscratch2, mo_bc_offs, R4_counters); 1808 if (UseOnStackReplacement) { 1809 __ and_(Rscratch3, Rscratch2, Rscratch3); 1810 __ bne(CCR0, Lforward); 1811 } else { 1812 __ b(Lforward); 1813 } 1814 __ bind(Loverflow); 1815 1816 // Notify point for loop, pass branch bytecode. 1817 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp). 1818 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 1819 1820 // Was an OSR adapter generated? 1821 __ cmpdi(CCR0, R3_RET, 0); 1822 __ beq(CCR0, Lforward); 1823 1824 // Has the nmethod been invalidated already? 1825 __ lbz(R0, nmethod::state_offset(), R3_RET); 1826 __ cmpwi(CCR0, R0, nmethod::in_use); 1827 __ bne(CCR0, Lforward); 1828 1829 // Migrate the interpreter frame off of the stack. 1830 // We can use all registers because we will not return to interpreter from this point. 1831 1832 // Save nmethod. 1833 const Register osr_nmethod = R31; 1834 __ mr(osr_nmethod, R3_RET); 1835 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1836 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1837 __ reset_last_Java_frame(); 1838 // OSR buffer is in ARG1. 1839 1840 // Remove the interpreter frame. 1841 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1842 1843 // Jump to the osr code. 1844 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1845 __ mtlr(R0); 1846 __ mtctr(R11_scratch1); 1847 __ bctr(); 1848 1849 } else { 1850 1851 const Register invoke_ctr = Rscratch1; 1852 // Update Backedge branch separately from invocations. 1853 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1854 1855 if (ProfileInterpreter) { 1856 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward); 1857 if (UseOnStackReplacement) { 1858 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2); 1859 } 1860 } else { 1861 if (UseOnStackReplacement) { 1862 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2); 1863 } 1864 } 1865 } 1866 1867 __ bind(Lforward); 1868 } 1869 __ dispatch_next(vtos, 0, true); 1870 } 1871 1872 // Helper function for if_cmp* methods below. 1873 // Factored out common compare and branch code. 1874 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1875 Label Lnot_taken; 1876 // Note: The condition code we get is the condition under which we 1877 // *fall through*! So we have to inverse the CC here. 1878 1879 if (is_jint) { 1880 if (cmp0) { 1881 __ cmpwi(CCR0, Rfirst, 0); 1882 } else { 1883 __ cmpw(CCR0, Rfirst, Rsecond); 1884 } 1885 } else { 1886 if (cmp0) { 1887 __ cmpdi(CCR0, Rfirst, 0); 1888 } else { 1889 __ cmpd(CCR0, Rfirst, Rsecond); 1890 } 1891 } 1892 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1893 1894 // Conition is false => Jump! 1895 branch(false, false); 1896 1897 // Condition is not true => Continue. 1898 __ align(32, 12); 1899 __ bind(Lnot_taken); 1900 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1901 } 1902 1903 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1904 void TemplateTable::if_0cmp(Condition cc) { 1905 transition(itos, vtos); 1906 1907 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1908 } 1909 1910 // Compare integer values and fall through if CC holds, branch away otherwise. 1911 // 1912 // Interface: 1913 // - Rfirst: First operand (older stack value) 1914 // - tos: Second operand (younger stack value) 1915 void TemplateTable::if_icmp(Condition cc) { 1916 transition(itos, vtos); 1917 1918 const Register Rfirst = R0, 1919 Rsecond = R17_tos; 1920 1921 __ pop_i(Rfirst); 1922 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1923 } 1924 1925 void TemplateTable::if_nullcmp(Condition cc) { 1926 transition(atos, vtos); 1927 1928 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1929 } 1930 1931 void TemplateTable::if_acmp(Condition cc) { 1932 transition(atos, vtos); 1933 1934 const Register Rfirst = R0, 1935 Rsecond = R17_tos; 1936 1937 __ pop_ptr(Rfirst); 1938 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1939 } 1940 1941 void TemplateTable::ret() { 1942 locals_index(R11_scratch1); 1943 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1944 1945 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1946 1947 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1948 __ add(R11_scratch1, R17_tos, R11_scratch1); 1949 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1950 __ dispatch_next(vtos, 0, true); 1951 } 1952 1953 void TemplateTable::wide_ret() { 1954 transition(vtos, vtos); 1955 1956 const Register Rindex = R3_ARG1, 1957 Rscratch1 = R11_scratch1, 1958 Rscratch2 = R12_scratch2; 1959 1960 locals_index_wide(Rindex); 1961 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1962 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1963 // Tos now contains the bci, compute the bcp from that. 1964 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1965 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1966 __ add(R14_bcp, Rscratch1, Rscratch2); 1967 __ dispatch_next(vtos, 0, true); 1968 } 1969 1970 void TemplateTable::tableswitch() { 1971 transition(itos, vtos); 1972 1973 Label Ldispatch, Ldefault_case; 1974 Register Rlow_byte = R3_ARG1, 1975 Rindex = Rlow_byte, 1976 Rhigh_byte = R4_ARG2, 1977 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1978 Rscratch1 = R11_scratch1, 1979 Rscratch2 = R12_scratch2, 1980 Roffset = R6_ARG4; 1981 1982 // Align bcp. 1983 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1984 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1985 1986 // Load lo & hi. 1987 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1988 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1989 1990 // Check for default case (=index outside [low,high]). 1991 __ cmpw(CCR0, R17_tos, Rlow_byte); 1992 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1993 __ blt(CCR0, Ldefault_case); 1994 __ bgt(CCR1, Ldefault_case); 1995 1996 // Lookup dispatch offset. 1997 __ sub(Rindex, R17_tos, Rlow_byte); 1998 __ extsw(Rindex, Rindex); 1999 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 2000 __ sldi(Rindex, Rindex, LogBytesPerInt); 2001 __ addi(Rindex, Rindex, 3 * BytesPerInt); 2002 #if defined(VM_LITTLE_ENDIAN) 2003 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 2004 __ extsw(Roffset, Roffset); 2005 #else 2006 __ lwax(Roffset, Rdef_offset_addr, Rindex); 2007 #endif 2008 __ b(Ldispatch); 2009 2010 __ bind(Ldefault_case); 2011 __ profile_switch_default(Rhigh_byte, Rscratch1); 2012 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 2013 2014 __ bind(Ldispatch); 2015 2016 __ add(R14_bcp, Roffset, R14_bcp); 2017 __ dispatch_next(vtos, 0, true); 2018 } 2019 2020 void TemplateTable::lookupswitch() { 2021 transition(itos, itos); 2022 __ stop("lookupswitch bytecode should have been rewritten"); 2023 } 2024 2025 // Table switch using linear search through cases. 2026 // Bytecode stream format: 2027 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2028 // Note: Everything is big-endian format here. 2029 void TemplateTable::fast_linearswitch() { 2030 transition(itos, vtos); 2031 2032 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 2033 Register Rcount = R3_ARG1, 2034 Rcurrent_pair = R4_ARG2, 2035 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 2036 Roffset = R31, // Might need to survive C call. 2037 Rvalue = R12_scratch2, 2038 Rscratch = R11_scratch1, 2039 Rcmp_value = R17_tos; 2040 2041 // Align bcp. 2042 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 2043 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 2044 2045 // Setup loop counter and limit. 2046 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2047 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 2048 2049 __ mtctr(Rcount); 2050 __ cmpwi(CCR0, Rcount, 0); 2051 __ bne(CCR0, Lloop_entry); 2052 2053 // Default case 2054 __ bind(Ldefault_case); 2055 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 2056 if (ProfileInterpreter) { 2057 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 2058 } 2059 __ b(Lcontinue_execution); 2060 2061 // Next iteration 2062 __ bind(Lsearch_loop); 2063 __ bdz(Ldefault_case); 2064 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 2065 __ bind(Lloop_entry); 2066 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 2067 __ cmpw(CCR0, Rvalue, Rcmp_value); 2068 __ bne(CCR0, Lsearch_loop); 2069 2070 // Found, load offset. 2071 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 2072 // Calculate case index and profile 2073 __ mfctr(Rcurrent_pair); 2074 if (ProfileInterpreter) { 2075 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 2076 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 2077 } 2078 2079 __ bind(Lcontinue_execution); 2080 __ add(R14_bcp, Roffset, R14_bcp); 2081 __ dispatch_next(vtos, 0, true); 2082 } 2083 2084 // Table switch using binary search (value/offset pairs are ordered). 2085 // Bytecode stream format: 2086 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2087 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 2088 void TemplateTable::fast_binaryswitch() { 2089 2090 transition(itos, vtos); 2091 // Implementation using the following core algorithm: (copied from Intel) 2092 // 2093 // int binary_search(int key, LookupswitchPair* array, int n) { 2094 // // Binary search according to "Methodik des Programmierens" by 2095 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2096 // int i = 0; 2097 // int j = n; 2098 // while (i+1 < j) { 2099 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2100 // // with Q: for all i: 0 <= i < n: key < a[i] 2101 // // where a stands for the array and assuming that the (inexisting) 2102 // // element a[n] is infinitely big. 2103 // int h = (i + j) >> 1; 2104 // // i < h < j 2105 // if (key < array[h].fast_match()) { 2106 // j = h; 2107 // } else { 2108 // i = h; 2109 // } 2110 // } 2111 // // R: a[i] <= key < a[i+1] or Q 2112 // // (i.e., if key is within array, i is the correct index) 2113 // return i; 2114 // } 2115 2116 // register allocation 2117 const Register Rkey = R17_tos; // already set (tosca) 2118 const Register Rarray = R3_ARG1; 2119 const Register Ri = R4_ARG2; 2120 const Register Rj = R5_ARG3; 2121 const Register Rh = R6_ARG4; 2122 const Register Rscratch = R11_scratch1; 2123 2124 const int log_entry_size = 3; 2125 const int entry_size = 1 << log_entry_size; 2126 2127 Label found; 2128 2129 // Find Array start, 2130 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2131 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2132 2133 // initialize i & j 2134 __ li(Ri,0); 2135 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2136 2137 // and start. 2138 Label entry; 2139 __ b(entry); 2140 2141 // binary search loop 2142 { Label loop; 2143 __ bind(loop); 2144 // int h = (i + j) >> 1; 2145 __ srdi(Rh, Rh, 1); 2146 // if (key < array[h].fast_match()) { 2147 // j = h; 2148 // } else { 2149 // i = h; 2150 // } 2151 __ sldi(Rscratch, Rh, log_entry_size); 2152 #if defined(VM_LITTLE_ENDIAN) 2153 __ lwbrx(Rscratch, Rscratch, Rarray); 2154 #else 2155 __ lwzx(Rscratch, Rscratch, Rarray); 2156 #endif 2157 2158 // if (key < current value) 2159 // Rh = Rj 2160 // else 2161 // Rh = Ri 2162 Label Lgreater; 2163 __ cmpw(CCR0, Rkey, Rscratch); 2164 __ bge(CCR0, Lgreater); 2165 __ mr(Rj, Rh); 2166 __ b(entry); 2167 __ bind(Lgreater); 2168 __ mr(Ri, Rh); 2169 2170 // while (i+1 < j) 2171 __ bind(entry); 2172 __ addi(Rscratch, Ri, 1); 2173 __ cmpw(CCR0, Rscratch, Rj); 2174 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2175 2176 __ blt(CCR0, loop); 2177 } 2178 2179 // End of binary search, result index is i (must check again!). 2180 Label default_case; 2181 Label continue_execution; 2182 if (ProfileInterpreter) { 2183 __ mr(Rh, Ri); // Save index in i for profiling. 2184 } 2185 // Ri = value offset 2186 __ sldi(Ri, Ri, log_entry_size); 2187 __ add(Ri, Ri, Rarray); 2188 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2189 2190 Label not_found; 2191 // Ri = offset offset 2192 __ cmpw(CCR0, Rkey, Rscratch); 2193 __ beq(CCR0, not_found); 2194 // entry not found -> j = default offset 2195 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2196 __ b(default_case); 2197 2198 __ bind(not_found); 2199 // entry found -> j = offset 2200 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2201 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2202 2203 if (ProfileInterpreter) { 2204 __ b(continue_execution); 2205 } 2206 2207 __ bind(default_case); // fall through (if not profiling) 2208 __ profile_switch_default(Ri, Rscratch); 2209 2210 __ bind(continue_execution); 2211 2212 __ extsw(Rj, Rj); 2213 __ add(R14_bcp, Rj, R14_bcp); 2214 __ dispatch_next(vtos, 0 , true); 2215 } 2216 2217 void TemplateTable::_return(TosState state) { 2218 transition(state, state); 2219 assert(_desc->calls_vm(), 2220 "inconsistent calls_vm information"); // call in remove_activation 2221 2222 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2223 2224 Register Rscratch = R11_scratch1, 2225 Rklass = R12_scratch2, 2226 Rklass_flags = Rklass; 2227 Label Lskip_register_finalizer; 2228 2229 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2230 assert(state == vtos, "only valid state"); 2231 __ ld(R17_tos, 0, R18_locals); 2232 2233 // Load klass of this obj. 2234 __ load_klass(Rklass, R17_tos); 2235 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2236 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2237 __ bfalse(CCR0, Lskip_register_finalizer); 2238 2239 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2240 2241 __ align(32, 12); 2242 __ bind(Lskip_register_finalizer); 2243 } 2244 2245 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 2246 Label no_safepoint; 2247 __ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread); 2248 __ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit()); 2249 __ beq(CCR0, no_safepoint); 2250 __ push(state); 2251 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2252 __ pop(state); 2253 __ bind(no_safepoint); 2254 } 2255 2256 // Move the result value into the correct register and remove memory stack frame. 2257 __ remove_activation(state, /* throw_monitor_exception */ true); 2258 // Restoration of lr done by remove_activation. 2259 switch (state) { 2260 // Narrow result if state is itos but result type is smaller. 2261 // Need to narrow in the return bytecode rather than in generate_return_entry 2262 // since compiled code callers expect the result to already be narrowed. 2263 case itos: __ narrow(R17_tos); /* fall through */ 2264 case ltos: 2265 case atos: __ mr(R3_RET, R17_tos); break; 2266 case ftos: 2267 case dtos: __ fmr(F1_RET, F15_ftos); break; 2268 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2269 // to get visible before the reference to the object gets stored anywhere. 2270 __ membar(Assembler::StoreStore); break; 2271 default : ShouldNotReachHere(); 2272 } 2273 __ blr(); 2274 } 2275 2276 // ============================================================================ 2277 // Constant pool cache access 2278 // 2279 // Memory ordering: 2280 // 2281 // Like done in C++ interpreter, we load the fields 2282 // - _indices 2283 // - _f12_oop 2284 // acquired, because these are asked if the cache is already resolved. We don't 2285 // want to float loads above this check. 2286 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2287 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2288 2289 // Call into the VM if call site is not yet resolved 2290 // 2291 // Input regs: 2292 // - None, all passed regs are outputs. 2293 // 2294 // Returns: 2295 // - Rcache: The const pool cache entry that contains the resolved result. 2296 // - Rresult: Either noreg or output for f1/f2. 2297 // 2298 // Kills: 2299 // - Rscratch 2300 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2301 2302 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2303 Label Lresolved, Ldone; 2304 2305 Bytecodes::Code code = bytecode(); 2306 switch (code) { 2307 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2308 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2309 } 2310 2311 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2312 // We are resolved if the indices offset contains the current bytecode. 2313 #if defined(VM_LITTLE_ENDIAN) 2314 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2315 #else 2316 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2317 #endif 2318 // Acquire by cmp-br-isync (see below). 2319 __ cmpdi(CCR0, Rscratch, (int)code); 2320 __ beq(CCR0, Lresolved); 2321 2322 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2323 __ li(R4_ARG2, code); 2324 __ call_VM(noreg, entry, R4_ARG2, true); 2325 2326 // Update registers with resolved info. 2327 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2328 __ b(Ldone); 2329 2330 __ bind(Lresolved); 2331 __ isync(); // Order load wrt. succeeding loads. 2332 __ bind(Ldone); 2333 } 2334 2335 // Load the constant pool cache entry at field accesses into registers. 2336 // The Rcache and Rindex registers must be set before call. 2337 // Input: 2338 // - Rcache, Rindex 2339 // Output: 2340 // - Robj, Roffset, Rflags 2341 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2342 Register Rcache, 2343 Register Rindex /* unused on PPC64 */, 2344 Register Roffset, 2345 Register Rflags, 2346 bool is_static = false) { 2347 assert_different_registers(Rcache, Rflags, Roffset); 2348 // assert(Rindex == noreg, "parameter not used on PPC64"); 2349 2350 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2351 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2352 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2353 if (is_static) { 2354 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2355 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2356 __ resolve_oop_handle(Robj); 2357 // Acquire not needed here. Following access has an address dependency on this value. 2358 } 2359 } 2360 2361 // Load the constant pool cache entry at invokes into registers. 2362 // Resolve if necessary. 2363 2364 // Input Registers: 2365 // - None, bcp is used, though 2366 // 2367 // Return registers: 2368 // - Rmethod (f1 field or f2 if invokevirtual) 2369 // - Ritable_index (f2 field) 2370 // - Rflags (flags field) 2371 // 2372 // Kills: 2373 // - R21 2374 // 2375 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2376 Register Rmethod, 2377 Register Ritable_index, 2378 Register Rflags, 2379 bool is_invokevirtual, 2380 bool is_invokevfinal, 2381 bool is_invokedynamic) { 2382 2383 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2384 // Determine constant pool cache field offsets. 2385 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2386 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2387 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2388 // Access constant pool cache fields. 2389 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2390 2391 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2392 2393 if (is_invokevfinal) { 2394 assert(Ritable_index == noreg, "register not used"); 2395 // Already resolved. 2396 __ get_cache_and_index_at_bcp(Rcache, 1); 2397 } else { 2398 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2399 } 2400 2401 __ ld(Rmethod, method_offset, Rcache); 2402 __ ld(Rflags, flags_offset, Rcache); 2403 2404 if (Ritable_index != noreg) { 2405 __ ld(Ritable_index, index_offset, Rcache); 2406 } 2407 } 2408 2409 // ============================================================================ 2410 // Field access 2411 2412 // Volatile variables demand their effects be made known to all CPU's 2413 // in order. Store buffers on most chips allow reads & writes to 2414 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2415 // without some kind of memory barrier (i.e., it's not sufficient that 2416 // the interpreter does not reorder volatile references, the hardware 2417 // also must not reorder them). 2418 // 2419 // According to the new Java Memory Model (JMM): 2420 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2421 // writes act as aquire & release, so: 2422 // (2) A read cannot let unrelated NON-volatile memory refs that 2423 // happen after the read float up to before the read. It's OK for 2424 // non-volatile memory refs that happen before the volatile read to 2425 // float down below it. 2426 // (3) Similar a volatile write cannot let unrelated NON-volatile 2427 // memory refs that happen BEFORE the write float down to after the 2428 // write. It's OK for non-volatile memory refs that happen after the 2429 // volatile write to float up before it. 2430 // 2431 // We only put in barriers around volatile refs (they are expensive), 2432 // not _between_ memory refs (that would require us to track the 2433 // flavor of the previous memory refs). Requirements (2) and (3) 2434 // require some barriers before volatile stores and after volatile 2435 // loads. These nearly cover requirement (1) but miss the 2436 // volatile-store-volatile-load case. This final case is placed after 2437 // volatile-stores although it could just as well go before 2438 // volatile-loads. 2439 2440 // The registers cache and index expected to be set before call. 2441 // Correct values of the cache and index registers are preserved. 2442 // Kills: 2443 // Rcache (if has_tos) 2444 // Rscratch 2445 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2446 2447 assert_different_registers(Rcache, Rscratch); 2448 2449 if (JvmtiExport::can_post_field_access()) { 2450 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2451 Label Lno_field_access_post; 2452 2453 // Check if post field access in enabled. 2454 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2455 __ lwz(Rscratch, offs, Rscratch); 2456 2457 __ cmpwi(CCR0, Rscratch, 0); 2458 __ beq(CCR0, Lno_field_access_post); 2459 2460 // Post access enabled - do it! 2461 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2462 if (is_static) { 2463 __ li(R17_tos, 0); 2464 } else { 2465 if (has_tos) { 2466 // The fast bytecode versions have obj ptr in register. 2467 // Thus, save object pointer before call_VM() clobbers it 2468 // put object on tos where GC wants it. 2469 __ push_ptr(R17_tos); 2470 } else { 2471 // Load top of stack (do not pop the value off the stack). 2472 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2473 } 2474 __ verify_oop(R17_tos); 2475 } 2476 // tos: object pointer or NULL if static 2477 // cache: cache entry pointer 2478 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2479 if (!is_static && has_tos) { 2480 // Restore object pointer. 2481 __ pop_ptr(R17_tos); 2482 __ verify_oop(R17_tos); 2483 } else { 2484 // Cache is still needed to get class or obj. 2485 __ get_cache_and_index_at_bcp(Rcache, 1); 2486 } 2487 2488 __ align(32, 12); 2489 __ bind(Lno_field_access_post); 2490 } 2491 } 2492 2493 // kills R11_scratch1 2494 void TemplateTable::pop_and_check_object(Register Roop) { 2495 Register Rtmp = R11_scratch1; 2496 2497 assert_different_registers(Rtmp, Roop); 2498 __ pop_ptr(Roop); 2499 // For field access must check obj. 2500 __ null_check_throw(Roop, -1, Rtmp); 2501 __ verify_oop(Roop); 2502 } 2503 2504 // PPC64: implement volatile loads as fence-store-acquire. 2505 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2506 transition(vtos, vtos); 2507 2508 Label Lacquire, Lisync; 2509 2510 const Register Rcache = R3_ARG1, 2511 Rclass_or_obj = R22_tmp2, 2512 Roffset = R23_tmp3, 2513 Rflags = R31, 2514 Rbtable = R5_ARG3, 2515 Rbc = R6_ARG4, 2516 Rscratch = R12_scratch2; 2517 2518 static address field_branch_table[number_of_states], 2519 static_branch_table[number_of_states]; 2520 2521 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2522 2523 // Get field offset. 2524 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2525 2526 // JVMTI support 2527 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2528 2529 // Load after possible GC. 2530 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2531 2532 // Load pointer to branch table. 2533 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2534 2535 // Get volatile flag. 2536 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2537 // Note: sync is needed before volatile load on PPC64. 2538 2539 // Check field type. 2540 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2541 2542 #ifdef ASSERT 2543 Label LFlagInvalid; 2544 __ cmpldi(CCR0, Rflags, number_of_states); 2545 __ bge(CCR0, LFlagInvalid); 2546 #endif 2547 2548 // Load from branch table and dispatch (volatile case: one instruction ahead). 2549 __ sldi(Rflags, Rflags, LogBytesPerWord); 2550 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2551 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2552 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2553 } 2554 __ ldx(Rbtable, Rbtable, Rflags); 2555 2556 // Get the obj from stack. 2557 if (!is_static) { 2558 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2559 } else { 2560 __ verify_oop(Rclass_or_obj); 2561 } 2562 2563 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2564 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2565 } 2566 __ mtctr(Rbtable); 2567 __ bctr(); 2568 2569 #ifdef ASSERT 2570 __ bind(LFlagInvalid); 2571 __ stop("got invalid flag", 0x654); 2572 #endif 2573 2574 if (!is_static && rc == may_not_rewrite) { 2575 // We reuse the code from is_static. It's jumped to via the table above. 2576 return; 2577 } 2578 2579 #ifdef ASSERT 2580 // __ bind(Lvtos); 2581 address pc_before_fence = __ pc(); 2582 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2583 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2584 assert(branch_table[vtos] == 0, "can't compute twice"); 2585 branch_table[vtos] = __ pc(); // non-volatile_entry point 2586 __ stop("vtos unexpected", 0x655); 2587 #endif 2588 2589 __ align(32, 28, 28); // Align load. 2590 // __ bind(Ldtos); 2591 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2592 assert(branch_table[dtos] == 0, "can't compute twice"); 2593 branch_table[dtos] = __ pc(); // non-volatile_entry point 2594 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2595 __ push(dtos); 2596 if (!is_static && rc == may_rewrite) { 2597 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2598 } 2599 { 2600 Label acquire_double; 2601 __ beq(CCR6, acquire_double); // Volatile? 2602 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2603 2604 __ bind(acquire_double); 2605 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2606 __ beq_predict_taken(CCR0, Lisync); 2607 __ b(Lisync); // In case of NAN. 2608 } 2609 2610 __ align(32, 28, 28); // Align load. 2611 // __ bind(Lftos); 2612 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2613 assert(branch_table[ftos] == 0, "can't compute twice"); 2614 branch_table[ftos] = __ pc(); // non-volatile_entry point 2615 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2616 __ push(ftos); 2617 if (!is_static && rc == may_rewrite) { 2618 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2619 } 2620 { 2621 Label acquire_float; 2622 __ beq(CCR6, acquire_float); // Volatile? 2623 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2624 2625 __ bind(acquire_float); 2626 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2627 __ beq_predict_taken(CCR0, Lisync); 2628 __ b(Lisync); // In case of NAN. 2629 } 2630 2631 __ align(32, 28, 28); // Align load. 2632 // __ bind(Litos); 2633 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2634 assert(branch_table[itos] == 0, "can't compute twice"); 2635 branch_table[itos] = __ pc(); // non-volatile_entry point 2636 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2637 __ push(itos); 2638 if (!is_static && rc == may_rewrite) { 2639 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2640 } 2641 __ beq(CCR6, Lacquire); // Volatile? 2642 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2643 2644 __ align(32, 28, 28); // Align load. 2645 // __ bind(Lltos); 2646 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2647 assert(branch_table[ltos] == 0, "can't compute twice"); 2648 branch_table[ltos] = __ pc(); // non-volatile_entry point 2649 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2650 __ push(ltos); 2651 if (!is_static && rc == may_rewrite) { 2652 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2653 } 2654 __ beq(CCR6, Lacquire); // Volatile? 2655 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2656 2657 __ align(32, 28, 28); // Align load. 2658 // __ bind(Lbtos); 2659 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2660 assert(branch_table[btos] == 0, "can't compute twice"); 2661 branch_table[btos] = __ pc(); // non-volatile_entry point 2662 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2663 __ extsb(R17_tos, R17_tos); 2664 __ push(btos); 2665 if (!is_static && rc == may_rewrite) { 2666 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2667 } 2668 __ beq(CCR6, Lacquire); // Volatile? 2669 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2670 2671 __ align(32, 28, 28); // Align load. 2672 // __ bind(Lztos); (same code as btos) 2673 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2674 assert(branch_table[ztos] == 0, "can't compute twice"); 2675 branch_table[ztos] = __ pc(); // non-volatile_entry point 2676 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2677 __ push(ztos); 2678 if (!is_static && rc == may_rewrite) { 2679 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2680 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2681 } 2682 __ beq(CCR6, Lacquire); // Volatile? 2683 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2684 2685 __ align(32, 28, 28); // Align load. 2686 // __ bind(Lctos); 2687 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2688 assert(branch_table[ctos] == 0, "can't compute twice"); 2689 branch_table[ctos] = __ pc(); // non-volatile_entry point 2690 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2691 __ push(ctos); 2692 if (!is_static && rc == may_rewrite) { 2693 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2694 } 2695 __ beq(CCR6, Lacquire); // Volatile? 2696 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2697 2698 __ align(32, 28, 28); // Align load. 2699 // __ bind(Lstos); 2700 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2701 assert(branch_table[stos] == 0, "can't compute twice"); 2702 branch_table[stos] = __ pc(); // non-volatile_entry point 2703 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2704 __ push(stos); 2705 if (!is_static && rc == may_rewrite) { 2706 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2707 } 2708 __ beq(CCR6, Lacquire); // Volatile? 2709 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2710 2711 __ align(32, 28, 28); // Align load. 2712 // __ bind(Latos); 2713 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2714 assert(branch_table[atos] == 0, "can't compute twice"); 2715 branch_table[atos] = __ pc(); // non-volatile_entry point 2716 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2717 __ verify_oop(R17_tos); 2718 __ push(atos); 2719 //__ dcbt(R17_tos); // prefetch 2720 if (!is_static && rc == may_rewrite) { 2721 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2722 } 2723 __ beq(CCR6, Lacquire); // Volatile? 2724 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2725 2726 __ align(32, 12); 2727 __ bind(Lacquire); 2728 __ twi_0(R17_tos); 2729 __ bind(Lisync); 2730 __ isync(); // acquire 2731 2732 #ifdef ASSERT 2733 for (int i = 0; i<number_of_states; ++i) { 2734 assert(branch_table[i], "get initialization"); 2735 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2736 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2737 } 2738 #endif 2739 } 2740 2741 void TemplateTable::getfield(int byte_no) { 2742 getfield_or_static(byte_no, false); 2743 } 2744 2745 void TemplateTable::nofast_getfield(int byte_no) { 2746 getfield_or_static(byte_no, false, may_not_rewrite); 2747 } 2748 2749 void TemplateTable::getstatic(int byte_no) { 2750 getfield_or_static(byte_no, true); 2751 } 2752 2753 // The registers cache and index expected to be set before call. 2754 // The function may destroy various registers, just not the cache and index registers. 2755 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2756 2757 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2758 2759 if (JvmtiExport::can_post_field_modification()) { 2760 Label Lno_field_mod_post; 2761 2762 // Check if post field access in enabled. 2763 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2764 __ lwz(Rscratch, offs, Rscratch); 2765 2766 __ cmpwi(CCR0, Rscratch, 0); 2767 __ beq(CCR0, Lno_field_mod_post); 2768 2769 // Do the post 2770 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2771 const Register Robj = Rscratch; 2772 2773 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2774 if (is_static) { 2775 // Life is simple. Null out the object pointer. 2776 __ li(Robj, 0); 2777 } else { 2778 // In case of the fast versions, value lives in registers => put it back on tos. 2779 int offs = Interpreter::expr_offset_in_bytes(0); 2780 Register base = R15_esp; 2781 switch(bytecode()) { 2782 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2783 case Bytecodes::_fast_iputfield: // Fall through 2784 case Bytecodes::_fast_bputfield: // Fall through 2785 case Bytecodes::_fast_zputfield: // Fall through 2786 case Bytecodes::_fast_cputfield: // Fall through 2787 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2788 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2789 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2790 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2791 default: { 2792 offs = 0; 2793 base = Robj; 2794 const Register Rflags = Robj; 2795 Label is_one_slot; 2796 // Life is harder. The stack holds the value on top, followed by the 2797 // object. We don't know the size of the value, though; it could be 2798 // one or two words depending on its type. As a result, we must find 2799 // the type to determine where the object is. 2800 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2801 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2802 2803 __ cmpwi(CCR0, Rflags, ltos); 2804 __ cmpwi(CCR1, Rflags, dtos); 2805 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2806 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2807 __ beq(CCR0, is_one_slot); 2808 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2809 __ bind(is_one_slot); 2810 break; 2811 } 2812 } 2813 __ ld(Robj, offs, base); 2814 __ verify_oop(Robj); 2815 } 2816 2817 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2818 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2819 __ get_cache_and_index_at_bcp(Rcache, 1); 2820 2821 // In case of the fast versions, value lives in registers => put it back on tos. 2822 switch(bytecode()) { 2823 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2824 case Bytecodes::_fast_iputfield: // Fall through 2825 case Bytecodes::_fast_bputfield: // Fall through 2826 case Bytecodes::_fast_zputfield: // Fall through 2827 case Bytecodes::_fast_cputfield: // Fall through 2828 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2829 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2830 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2831 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2832 default: break; // Nothin' to do. 2833 } 2834 2835 __ align(32, 12); 2836 __ bind(Lno_field_mod_post); 2837 } 2838 } 2839 2840 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2841 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2842 Label Lvolatile; 2843 2844 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2845 Rclass_or_obj = R31, // Needs to survive C call. 2846 Roffset = R22_tmp2, // Needs to survive C call. 2847 Rflags = R3_ARG1, 2848 Rbtable = R4_ARG2, 2849 Rscratch = R11_scratch1, 2850 Rscratch2 = R12_scratch2, 2851 Rscratch3 = R6_ARG4, 2852 Rbc = Rscratch3; 2853 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2854 2855 static address field_rw_branch_table[number_of_states], 2856 field_norw_branch_table[number_of_states], 2857 static_branch_table[number_of_states]; 2858 2859 address* branch_table = is_static ? static_branch_table : 2860 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2861 2862 // Stack (grows up): 2863 // value 2864 // obj 2865 2866 // Load the field offset. 2867 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2868 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2869 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2870 2871 // Load pointer to branch table. 2872 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2873 2874 // Get volatile flag. 2875 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2876 2877 // Check the field type. 2878 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2879 2880 #ifdef ASSERT 2881 Label LFlagInvalid; 2882 __ cmpldi(CCR0, Rflags, number_of_states); 2883 __ bge(CCR0, LFlagInvalid); 2884 #endif 2885 2886 // Load from branch table and dispatch (volatile case: one instruction ahead). 2887 __ sldi(Rflags, Rflags, LogBytesPerWord); 2888 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2889 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2890 } 2891 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2892 __ ldx(Rbtable, Rbtable, Rflags); 2893 2894 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2895 __ mtctr(Rbtable); 2896 __ bctr(); 2897 2898 #ifdef ASSERT 2899 __ bind(LFlagInvalid); 2900 __ stop("got invalid flag", 0x656); 2901 2902 // __ bind(Lvtos); 2903 address pc_before_release = __ pc(); 2904 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2905 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2906 assert(branch_table[vtos] == 0, "can't compute twice"); 2907 branch_table[vtos] = __ pc(); // non-volatile_entry point 2908 __ stop("vtos unexpected", 0x657); 2909 #endif 2910 2911 __ align(32, 28, 28); // Align pop. 2912 // __ bind(Ldtos); 2913 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2914 assert(branch_table[dtos] == 0, "can't compute twice"); 2915 branch_table[dtos] = __ pc(); // non-volatile_entry point 2916 __ pop(dtos); 2917 if (!is_static) { 2918 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2919 } 2920 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2921 if (!is_static && rc == may_rewrite) { 2922 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2923 } 2924 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2925 __ beq(CR_is_vol, Lvolatile); // Volatile? 2926 } 2927 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2928 2929 __ align(32, 28, 28); // Align pop. 2930 // __ bind(Lftos); 2931 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2932 assert(branch_table[ftos] == 0, "can't compute twice"); 2933 branch_table[ftos] = __ pc(); // non-volatile_entry point 2934 __ pop(ftos); 2935 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2936 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2937 if (!is_static && rc == may_rewrite) { 2938 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2939 } 2940 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2941 __ beq(CR_is_vol, Lvolatile); // Volatile? 2942 } 2943 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2944 2945 __ align(32, 28, 28); // Align pop. 2946 // __ bind(Litos); 2947 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2948 assert(branch_table[itos] == 0, "can't compute twice"); 2949 branch_table[itos] = __ pc(); // non-volatile_entry point 2950 __ pop(itos); 2951 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2952 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2953 if (!is_static && rc == may_rewrite) { 2954 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2955 } 2956 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2957 __ beq(CR_is_vol, Lvolatile); // Volatile? 2958 } 2959 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2960 2961 __ align(32, 28, 28); // Align pop. 2962 // __ bind(Lltos); 2963 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2964 assert(branch_table[ltos] == 0, "can't compute twice"); 2965 branch_table[ltos] = __ pc(); // non-volatile_entry point 2966 __ pop(ltos); 2967 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2968 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2969 if (!is_static && rc == may_rewrite) { 2970 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2971 } 2972 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2973 __ beq(CR_is_vol, Lvolatile); // Volatile? 2974 } 2975 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2976 2977 __ align(32, 28, 28); // Align pop. 2978 // __ bind(Lbtos); 2979 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2980 assert(branch_table[btos] == 0, "can't compute twice"); 2981 branch_table[btos] = __ pc(); // non-volatile_entry point 2982 __ pop(btos); 2983 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2984 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2985 if (!is_static && rc == may_rewrite) { 2986 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2987 } 2988 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2989 __ beq(CR_is_vol, Lvolatile); // Volatile? 2990 } 2991 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2992 2993 __ align(32, 28, 28); // Align pop. 2994 // __ bind(Lztos); 2995 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2996 assert(branch_table[ztos] == 0, "can't compute twice"); 2997 branch_table[ztos] = __ pc(); // non-volatile_entry point 2998 __ pop(ztos); 2999 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 3000 __ andi(R17_tos, R17_tos, 0x1); 3001 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3002 if (!is_static && rc == may_rewrite) { 3003 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); 3004 } 3005 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3006 __ beq(CR_is_vol, Lvolatile); // Volatile? 3007 } 3008 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3009 3010 __ align(32, 28, 28); // Align pop. 3011 // __ bind(Lctos); 3012 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3013 assert(branch_table[ctos] == 0, "can't compute twice"); 3014 branch_table[ctos] = __ pc(); // non-volatile_entry point 3015 __ pop(ctos); 3016 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 3017 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3018 if (!is_static && rc == may_rewrite) { 3019 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 3020 } 3021 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3022 __ beq(CR_is_vol, Lvolatile); // Volatile? 3023 } 3024 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3025 3026 __ align(32, 28, 28); // Align pop. 3027 // __ bind(Lstos); 3028 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3029 assert(branch_table[stos] == 0, "can't compute twice"); 3030 branch_table[stos] = __ pc(); // non-volatile_entry point 3031 __ pop(stos); 3032 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 3033 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3034 if (!is_static && rc == may_rewrite) { 3035 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 3036 } 3037 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3038 __ beq(CR_is_vol, Lvolatile); // Volatile? 3039 } 3040 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3041 3042 __ align(32, 28, 28); // Align pop. 3043 // __ bind(Latos); 3044 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3045 assert(branch_table[atos] == 0, "can't compute twice"); 3046 branch_table[atos] = __ pc(); // non-volatile_entry point 3047 __ pop(atos); 3048 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 3049 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 3050 if (!is_static && rc == may_rewrite) { 3051 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 3052 } 3053 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3054 __ beq(CR_is_vol, Lvolatile); // Volatile? 3055 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3056 3057 __ align(32, 12); 3058 __ bind(Lvolatile); 3059 __ fence(); 3060 } 3061 // fallthru: __ b(Lexit); 3062 3063 #ifdef ASSERT 3064 for (int i = 0; i<number_of_states; ++i) { 3065 assert(branch_table[i], "put initialization"); 3066 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 3067 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 3068 } 3069 #endif 3070 } 3071 3072 void TemplateTable::putfield(int byte_no) { 3073 putfield_or_static(byte_no, false); 3074 } 3075 3076 void TemplateTable::nofast_putfield(int byte_no) { 3077 putfield_or_static(byte_no, false, may_not_rewrite); 3078 } 3079 3080 void TemplateTable::putstatic(int byte_no) { 3081 putfield_or_static(byte_no, true); 3082 } 3083 3084 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 3085 void TemplateTable::jvmti_post_fast_field_mod() { 3086 __ should_not_reach_here(); 3087 } 3088 3089 void TemplateTable::fast_storefield(TosState state) { 3090 transition(state, vtos); 3091 3092 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 3093 Rclass_or_obj = R31, // Needs to survive C call. 3094 Roffset = R22_tmp2, // Needs to survive C call. 3095 Rflags = R3_ARG1, 3096 Rscratch = R11_scratch1, 3097 Rscratch2 = R12_scratch2, 3098 Rscratch3 = R4_ARG2; 3099 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 3100 3101 // Constant pool already resolved => Load flags and offset of field. 3102 __ get_cache_and_index_at_bcp(Rcache, 1); 3103 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 3104 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3105 3106 // Get the obj and the final store addr. 3107 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 3108 3109 // Get volatile flag. 3110 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3111 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 3112 { 3113 Label LnotVolatile; 3114 __ beq(CCR0, LnotVolatile); 3115 __ release(); 3116 __ align(32, 12); 3117 __ bind(LnotVolatile); 3118 } 3119 3120 // Do the store and fencing. 3121 switch(bytecode()) { 3122 case Bytecodes::_fast_aputfield: 3123 // Store into the field. 3124 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 3125 break; 3126 3127 case Bytecodes::_fast_iputfield: 3128 __ stwx(R17_tos, Rclass_or_obj, Roffset); 3129 break; 3130 3131 case Bytecodes::_fast_lputfield: 3132 __ stdx(R17_tos, Rclass_or_obj, Roffset); 3133 break; 3134 3135 case Bytecodes::_fast_zputfield: 3136 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 3137 // fall through to bputfield 3138 case Bytecodes::_fast_bputfield: 3139 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3140 break; 3141 3142 case Bytecodes::_fast_cputfield: 3143 case Bytecodes::_fast_sputfield: 3144 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3145 break; 3146 3147 case Bytecodes::_fast_fputfield: 3148 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 3149 break; 3150 3151 case Bytecodes::_fast_dputfield: 3152 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 3153 break; 3154 3155 default: ShouldNotReachHere(); 3156 } 3157 3158 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3159 Label LVolatile; 3160 __ beq(CR_is_vol, LVolatile); 3161 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3162 3163 __ align(32, 12); 3164 __ bind(LVolatile); 3165 __ fence(); 3166 } 3167 } 3168 3169 void TemplateTable::fast_accessfield(TosState state) { 3170 transition(atos, state); 3171 3172 Label LisVolatile; 3173 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3174 3175 const Register Rcache = R3_ARG1, 3176 Rclass_or_obj = R17_tos, 3177 Roffset = R22_tmp2, 3178 Rflags = R23_tmp3, 3179 Rscratch = R12_scratch2; 3180 3181 // Constant pool already resolved. Get the field offset. 3182 __ get_cache_and_index_at_bcp(Rcache, 1); 3183 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3184 3185 // JVMTI support 3186 jvmti_post_field_access(Rcache, Rscratch, false, true); 3187 3188 // Get the load address. 3189 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3190 3191 // Get volatile flag. 3192 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3193 __ bne(CCR0, LisVolatile); 3194 3195 switch(bytecode()) { 3196 case Bytecodes::_fast_agetfield: 3197 { 3198 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3199 __ verify_oop(R17_tos); 3200 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3201 3202 __ bind(LisVolatile); 3203 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3204 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3205 __ verify_oop(R17_tos); 3206 __ twi_0(R17_tos); 3207 __ isync(); 3208 break; 3209 } 3210 case Bytecodes::_fast_igetfield: 3211 { 3212 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3213 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3214 3215 __ bind(LisVolatile); 3216 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3217 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3218 __ twi_0(R17_tos); 3219 __ isync(); 3220 break; 3221 } 3222 case Bytecodes::_fast_lgetfield: 3223 { 3224 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3225 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3226 3227 __ bind(LisVolatile); 3228 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3229 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3230 __ twi_0(R17_tos); 3231 __ isync(); 3232 break; 3233 } 3234 case Bytecodes::_fast_bgetfield: 3235 { 3236 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3237 __ extsb(R17_tos, R17_tos); 3238 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3239 3240 __ bind(LisVolatile); 3241 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3242 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3243 __ twi_0(R17_tos); 3244 __ extsb(R17_tos, R17_tos); 3245 __ isync(); 3246 break; 3247 } 3248 case Bytecodes::_fast_cgetfield: 3249 { 3250 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3251 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3252 3253 __ bind(LisVolatile); 3254 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3255 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3256 __ twi_0(R17_tos); 3257 __ isync(); 3258 break; 3259 } 3260 case Bytecodes::_fast_sgetfield: 3261 { 3262 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3263 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3264 3265 __ bind(LisVolatile); 3266 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3267 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3268 __ twi_0(R17_tos); 3269 __ isync(); 3270 break; 3271 } 3272 case Bytecodes::_fast_fgetfield: 3273 { 3274 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3275 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3276 3277 __ bind(LisVolatile); 3278 Label Ldummy; 3279 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3280 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3281 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3282 __ bne_predict_not_taken(CCR0, Ldummy); 3283 __ bind(Ldummy); 3284 __ isync(); 3285 break; 3286 } 3287 case Bytecodes::_fast_dgetfield: 3288 { 3289 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3290 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3291 3292 __ bind(LisVolatile); 3293 Label Ldummy; 3294 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3295 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3296 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3297 __ bne_predict_not_taken(CCR0, Ldummy); 3298 __ bind(Ldummy); 3299 __ isync(); 3300 break; 3301 } 3302 default: ShouldNotReachHere(); 3303 } 3304 } 3305 3306 void TemplateTable::fast_xaccess(TosState state) { 3307 transition(vtos, state); 3308 3309 Label LisVolatile; 3310 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3311 const Register Rcache = R3_ARG1, 3312 Rclass_or_obj = R17_tos, 3313 Roffset = R22_tmp2, 3314 Rflags = R23_tmp3, 3315 Rscratch = R12_scratch2; 3316 3317 __ ld(Rclass_or_obj, 0, R18_locals); 3318 3319 // Constant pool already resolved. Get the field offset. 3320 __ get_cache_and_index_at_bcp(Rcache, 2); 3321 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3322 3323 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3324 3325 // Needed to report exception at the correct bcp. 3326 __ addi(R14_bcp, R14_bcp, 1); 3327 3328 // Get the load address. 3329 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3330 3331 // Get volatile flag. 3332 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3333 __ bne(CCR0, LisVolatile); 3334 3335 switch(state) { 3336 case atos: 3337 { 3338 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3339 __ verify_oop(R17_tos); 3340 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3341 3342 __ bind(LisVolatile); 3343 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3344 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3345 __ verify_oop(R17_tos); 3346 __ twi_0(R17_tos); 3347 __ isync(); 3348 break; 3349 } 3350 case itos: 3351 { 3352 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3353 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3354 3355 __ bind(LisVolatile); 3356 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3357 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3358 __ twi_0(R17_tos); 3359 __ isync(); 3360 break; 3361 } 3362 case ftos: 3363 { 3364 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3365 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3366 3367 __ bind(LisVolatile); 3368 Label Ldummy; 3369 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3370 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3371 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3372 __ bne_predict_not_taken(CCR0, Ldummy); 3373 __ bind(Ldummy); 3374 __ isync(); 3375 break; 3376 } 3377 default: ShouldNotReachHere(); 3378 } 3379 __ addi(R14_bcp, R14_bcp, -1); 3380 } 3381 3382 // ============================================================================ 3383 // Calls 3384 3385 // Common code for invoke 3386 // 3387 // Input: 3388 // - byte_no 3389 // 3390 // Output: 3391 // - Rmethod: The method to invoke next. 3392 // - Rret_addr: The return address to return to. 3393 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3394 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3395 // - Rflags: Method flags from const pool cache. 3396 // 3397 // Kills: 3398 // - Rscratch1 3399 // 3400 void TemplateTable::prepare_invoke(int byte_no, 3401 Register Rmethod, // linked method (or i-klass) 3402 Register Rret_addr,// return address 3403 Register Rindex, // itable index, MethodType, etc. 3404 Register Rrecv, // If caller wants to see it. 3405 Register Rflags, // If caller wants to test it. 3406 Register Rscratch 3407 ) { 3408 // Determine flags. 3409 const Bytecodes::Code code = bytecode(); 3410 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3411 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3412 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3413 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3414 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3415 const bool load_receiver = (Rrecv != noreg); 3416 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3417 3418 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3419 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3420 assert_different_registers(Rret_addr, Rscratch); 3421 3422 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3423 3424 // Saving of SP done in call_from_interpreter. 3425 3426 // Maybe push "appendix" to arguments. 3427 if (is_invokedynamic || is_invokehandle) { 3428 Label Ldone; 3429 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3430 __ beq(CCR0, Ldone); 3431 // Push "appendix" (MethodType, CallSite, etc.). 3432 // This must be done before we get the receiver, 3433 // since the parameter_size includes it. 3434 __ load_resolved_reference_at_index(Rscratch, Rindex); 3435 __ verify_oop(Rscratch); 3436 __ push_ptr(Rscratch); 3437 __ bind(Ldone); 3438 } 3439 3440 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3441 if (load_receiver) { 3442 const Register Rparam_count = Rscratch; 3443 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3444 __ load_receiver(Rparam_count, Rrecv); 3445 __ verify_oop(Rrecv); 3446 } 3447 3448 // Get return address. 3449 { 3450 Register Rtable_addr = Rscratch; 3451 Register Rret_type = Rret_addr; 3452 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3453 3454 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3455 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3456 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3457 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3458 // Get return address. 3459 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3460 } 3461 } 3462 3463 // Helper for virtual calls. Load target out of vtable and jump off! 3464 // Kills all passed registers. 3465 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3466 3467 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3468 const Register Rtarget_method = Rindex; 3469 3470 // Get target method & entry point. 3471 const int base = in_bytes(Klass::vtable_start_offset()); 3472 // Calc vtable addr scale the vtable index by 8. 3473 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes())); 3474 // Load target. 3475 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3476 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3477 // Argument and return type profiling. 3478 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3479 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3480 } 3481 3482 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3483 void TemplateTable::invokevirtual(int byte_no) { 3484 transition(vtos, vtos); 3485 3486 Register Rtable_addr = R11_scratch1, 3487 Rret_type = R12_scratch2, 3488 Rret_addr = R5_ARG3, 3489 Rflags = R22_tmp2, // Should survive C call. 3490 Rrecv = R3_ARG1, 3491 Rrecv_klass = Rrecv, 3492 Rvtableindex_or_method = R31, // Should survive C call. 3493 Rnum_params = R4_ARG2, 3494 Rnew_bc = R6_ARG4; 3495 3496 Label LnotFinal; 3497 3498 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3499 3500 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3501 __ bfalse(CCR0, LnotFinal); 3502 3503 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 3504 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3505 } 3506 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3507 3508 __ align(32, 12); 3509 __ bind(LnotFinal); 3510 // Load "this" pointer (receiver). 3511 __ rldicl(Rnum_params, Rflags, 64, 48); 3512 __ load_receiver(Rnum_params, Rrecv); 3513 __ verify_oop(Rrecv); 3514 3515 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3516 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3517 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3518 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3519 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3520 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3521 __ load_klass(Rrecv_klass, Rrecv); 3522 __ verify_klass_ptr(Rrecv_klass); 3523 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3524 3525 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3526 } 3527 3528 void TemplateTable::fast_invokevfinal(int byte_no) { 3529 transition(vtos, vtos); 3530 3531 assert(byte_no == f2_byte, "use this argument"); 3532 Register Rflags = R22_tmp2, 3533 Rmethod = R31; 3534 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3535 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3536 } 3537 3538 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3539 3540 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3541 3542 // Load receiver from stack slot. 3543 Register Rrecv = Rscratch2; 3544 Register Rnum_params = Rrecv; 3545 3546 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3547 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3548 3549 // Get return address. 3550 Register Rtable_addr = Rscratch1, 3551 Rret_addr = Rflags, 3552 Rret_type = Rret_addr; 3553 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3554 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3555 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3556 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3557 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3558 3559 // Load receiver and receiver NULL check. 3560 __ load_receiver(Rnum_params, Rrecv); 3561 __ null_check_throw(Rrecv, -1, Rscratch1); 3562 3563 __ profile_final_call(Rrecv, Rscratch1); 3564 // Argument and return type profiling. 3565 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3566 3567 // Do the call. 3568 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3569 } 3570 3571 void TemplateTable::invokespecial(int byte_no) { 3572 assert(byte_no == f1_byte, "use this argument"); 3573 transition(vtos, vtos); 3574 3575 Register Rtable_addr = R3_ARG1, 3576 Rret_addr = R4_ARG2, 3577 Rflags = R5_ARG3, 3578 Rreceiver = R6_ARG4, 3579 Rmethod = R31; 3580 3581 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3582 3583 // Receiver NULL check. 3584 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3585 3586 __ profile_call(R11_scratch1, R12_scratch2); 3587 // Argument and return type profiling. 3588 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3589 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3590 } 3591 3592 void TemplateTable::invokestatic(int byte_no) { 3593 assert(byte_no == f1_byte, "use this argument"); 3594 transition(vtos, vtos); 3595 3596 Register Rtable_addr = R3_ARG1, 3597 Rret_addr = R4_ARG2, 3598 Rflags = R5_ARG3; 3599 3600 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3601 3602 __ profile_call(R11_scratch1, R12_scratch2); 3603 // Argument and return type profiling. 3604 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3605 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3606 } 3607 3608 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3609 Register Rret, 3610 Register Rflags, 3611 Register Rmethod, 3612 Register Rtemp1, 3613 Register Rtemp2) { 3614 3615 assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3616 Label LnotFinal; 3617 3618 // Check for vfinal. 3619 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3620 __ bfalse(CCR0, LnotFinal); 3621 3622 Register Rscratch = Rflags; // Rflags is dead now. 3623 3624 // Final call case. 3625 __ profile_final_call(Rtemp1, Rscratch); 3626 // Argument and return type profiling. 3627 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true); 3628 // Do the final call - the index (f2) contains the method. 3629 __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */); 3630 3631 // Non-final callc case. 3632 __ bind(LnotFinal); 3633 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3634 generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch); 3635 } 3636 3637 void TemplateTable::invokeinterface(int byte_no) { 3638 assert(byte_no == f1_byte, "use this argument"); 3639 transition(vtos, vtos); 3640 3641 const Register Rscratch1 = R11_scratch1, 3642 Rscratch2 = R12_scratch2, 3643 Rmethod = R6_ARG4, 3644 Rmethod2 = R9_ARG7, 3645 Rinterface_klass = R5_ARG3, 3646 Rret_addr = R8_ARG6, 3647 Rindex = R10_ARG8, 3648 Rreceiver = R3_ARG1, 3649 Rrecv_klass = R4_ARG2, 3650 Rflags = R7_ARG5; 3651 3652 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1); 3653 3654 // Get receiver klass. 3655 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2); 3656 __ load_klass(Rrecv_klass, Rreceiver); 3657 3658 // Check corner case object method. 3659 Label LobjectMethod, L_no_such_interface, Lthrow_ame; 3660 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3661 __ btrue(CCR0, LobjectMethod); 3662 3663 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2, 3664 L_no_such_interface, /*return_method=*/false); 3665 3666 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3667 3668 // Find entry point to call. 3669 3670 // Get declaring interface class from method 3671 __ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod); 3672 __ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass); 3673 __ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass); 3674 3675 // Get itable index from method 3676 __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod); 3677 __ subfic(Rindex, Rindex, Method::itable_index_max); 3678 3679 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2, 3680 L_no_such_interface); 3681 3682 __ cmpdi(CCR0, Rmethod2, 0); 3683 __ beq(CCR0, Lthrow_ame); 3684 // Found entry. Jump off! 3685 // Argument and return type profiling. 3686 __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true); 3687 //__ profile_called_method(Rindex, Rscratch1); 3688 __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2); 3689 3690 // Vtable entry was NULL => Throw abstract method error. 3691 __ bind(Lthrow_ame); 3692 // Pass arguments for generating a verbose error message. 3693 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3694 Rrecv_klass, Rmethod); 3695 3696 // Interface was not found => Throw incompatible class change error. 3697 __ bind(L_no_such_interface); 3698 // Pass arguments for generating a verbose error message. 3699 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3700 Rrecv_klass, Rinterface_klass); 3701 DEBUG_ONLY( __ should_not_reach_here(); ) 3702 3703 // Special case of invokeinterface called for virtual method of 3704 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3705 // The invokeinterface was rewritten to a invokevirtual, hence we have 3706 // to handle this corner case. This code isn't produced by javac, but could 3707 // be produced by another compliant java compiler. 3708 __ bind(LobjectMethod); 3709 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2); 3710 } 3711 3712 void TemplateTable::invokedynamic(int byte_no) { 3713 transition(vtos, vtos); 3714 3715 const Register Rret_addr = R3_ARG1, 3716 Rflags = R4_ARG2, 3717 Rmethod = R22_tmp2, 3718 Rscratch1 = R11_scratch1, 3719 Rscratch2 = R12_scratch2; 3720 3721 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3722 3723 // Profile this call. 3724 __ profile_call(Rscratch1, Rscratch2); 3725 3726 // Off we go. With the new method handles, we don't jump to a method handle 3727 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3728 // to be the callsite object the bootstrap method returned. This is passed to a 3729 // "link" method which does the dispatch (Most likely just grabs the MH stored 3730 // inside the callsite and does an invokehandle). 3731 // Argument and return type profiling. 3732 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3733 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3734 } 3735 3736 void TemplateTable::invokehandle(int byte_no) { 3737 transition(vtos, vtos); 3738 3739 const Register Rret_addr = R3_ARG1, 3740 Rflags = R4_ARG2, 3741 Rrecv = R5_ARG3, 3742 Rmethod = R22_tmp2, 3743 Rscratch1 = R11_scratch1, 3744 Rscratch2 = R12_scratch2; 3745 3746 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3747 __ verify_method_ptr(Rmethod); 3748 __ null_check_throw(Rrecv, -1, Rscratch2); 3749 3750 __ profile_final_call(Rrecv, Rscratch1); 3751 3752 // Still no call from handle => We call the method handle interpreter here. 3753 // Argument and return type profiling. 3754 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3755 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3756 } 3757 3758 // ============================================================================= 3759 // Allocation 3760 3761 // Puts allocated obj ref onto the expression stack. 3762 void TemplateTable::_new() { 3763 transition(vtos, atos); 3764 3765 Label Lslow_case, 3766 Ldone; 3767 3768 const Register RallocatedObject = R17_tos, 3769 RinstanceKlass = R9_ARG7, 3770 Rscratch = R11_scratch1, 3771 Roffset = R8_ARG6, 3772 Rinstance_size = Roffset, 3773 Rcpool = R4_ARG2, 3774 Rtags = R3_ARG1, 3775 Rindex = R5_ARG3; 3776 3777 // -------------------------------------------------------------------------- 3778 // Check if fast case is possible. 3779 3780 // Load pointers to const pool and const pool's tags array. 3781 __ get_cpool_and_tags(Rcpool, Rtags); 3782 // Load index of constant pool entry. 3783 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3784 3785 // Note: compared to other architectures, PPC's implementation always goes 3786 // to the slow path if TLAB is used and fails. 3787 if (UseTLAB) { 3788 // Make sure the class we're about to instantiate has been resolved 3789 // This is done before loading instanceKlass to be consistent with the order 3790 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3791 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3792 __ lbzx(Rtags, Rindex, Rtags); 3793 3794 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3795 __ bne(CCR0, Lslow_case); 3796 3797 // Get instanceKlass 3798 __ sldi(Roffset, Rindex, LogBytesPerWord); 3799 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass); 3800 3801 // Make sure klass is fully initialized and get instance_size. 3802 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3803 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3804 3805 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3806 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3807 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3808 3809 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3810 __ beq(CCR0, Lslow_case); 3811 3812 // -------------------------------------------------------------------------- 3813 // Fast case: 3814 // Allocate the instance. 3815 // 1) Try to allocate in the TLAB. 3816 // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3817 3818 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3819 Register RnewTopValue = R6_ARG4; 3820 Register RendValue = R7_ARG5; 3821 3822 // Check if we can allocate in the TLAB. 3823 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3824 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3825 3826 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3827 3828 // If there is enough space, we do not CAS and do not clear. 3829 __ cmpld(CCR0, RnewTopValue, RendValue); 3830 __ bgt(CCR0, Lslow_case); 3831 3832 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3833 3834 if (!ZeroTLAB) { 3835 // -------------------------------------------------------------------------- 3836 // Init1: Zero out newly allocated memory. 3837 // Initialize remaining object fields. 3838 Register Rbase = Rtags; 3839 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3840 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3841 __ srdi(Rinstance_size, Rinstance_size, 3); 3842 3843 // Clear out object skipping header. Takes also care of the zero length case. 3844 __ clear_memory_doubleword(Rbase, Rinstance_size); 3845 } 3846 3847 // -------------------------------------------------------------------------- 3848 // Init2: Initialize the header: mark, klass 3849 // Init mark. 3850 if (UseBiasedLocking) { 3851 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3852 } else { 3853 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3854 } 3855 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3856 3857 // Init klass. 3858 __ store_klass_gap(RallocatedObject); 3859 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3860 3861 // Check and trigger dtrace event. 3862 SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone); 3863 __ push(atos); 3864 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3865 __ pop(atos); 3866 3867 __ b(Ldone); 3868 } 3869 3870 // -------------------------------------------------------------------------- 3871 // slow case 3872 __ bind(Lslow_case); 3873 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3874 3875 // continue 3876 __ bind(Ldone); 3877 3878 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3879 __ membar(Assembler::StoreStore); 3880 } 3881 3882 void TemplateTable::newarray() { 3883 transition(itos, atos); 3884 3885 __ lbz(R4, 1, R14_bcp); 3886 __ extsw(R5, R17_tos); 3887 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3888 3889 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3890 __ membar(Assembler::StoreStore); 3891 } 3892 3893 void TemplateTable::anewarray() { 3894 transition(itos, atos); 3895 3896 __ get_constant_pool(R4); 3897 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3898 __ extsw(R6, R17_tos); // size 3899 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3900 3901 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3902 __ membar(Assembler::StoreStore); 3903 } 3904 3905 // Allocate a multi dimensional array 3906 void TemplateTable::multianewarray() { 3907 transition(vtos, atos); 3908 3909 Register Rptr = R31; // Needs to survive C call. 3910 3911 // Put ndims * wordSize into frame temp slot 3912 __ lbz(Rptr, 3, R14_bcp); 3913 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3914 // Esp points past last_dim, so set to R4 to first_dim address. 3915 __ add(R4, Rptr, R15_esp); 3916 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3917 // Pop all dimensions off the stack. 3918 __ add(R15_esp, Rptr, R15_esp); 3919 3920 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3921 __ membar(Assembler::StoreStore); 3922 } 3923 3924 void TemplateTable::arraylength() { 3925 transition(atos, itos); 3926 3927 Label LnoException; 3928 __ verify_oop(R17_tos); 3929 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3930 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3931 } 3932 3933 // ============================================================================ 3934 // Typechecks 3935 3936 void TemplateTable::checkcast() { 3937 transition(atos, atos); 3938 3939 Label Ldone, Lis_null, Lquicked, Lresolved; 3940 Register Roffset = R6_ARG4, 3941 RobjKlass = R4_ARG2, 3942 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3943 Rcpool = R11_scratch1, 3944 Rtags = R12_scratch2; 3945 3946 // Null does not pass. 3947 __ cmpdi(CCR0, R17_tos, 0); 3948 __ beq(CCR0, Lis_null); 3949 3950 // Get constant pool tag to find out if the bytecode has already been "quickened". 3951 __ get_cpool_and_tags(Rcpool, Rtags); 3952 3953 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3954 3955 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3956 __ lbzx(Rtags, Rtags, Roffset); 3957 3958 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3959 __ beq(CCR0, Lquicked); 3960 3961 // Call into the VM to "quicken" instanceof. 3962 __ push_ptr(); // for GC 3963 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3964 __ get_vm_result_2(RspecifiedKlass); 3965 __ pop_ptr(); // Restore receiver. 3966 __ b(Lresolved); 3967 3968 // Extract target class from constant pool. 3969 __ bind(Lquicked); 3970 __ sldi(Roffset, Roffset, LogBytesPerWord); 3971 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3972 3973 // Do the checkcast. 3974 __ bind(Lresolved); 3975 // Get value klass in RobjKlass. 3976 __ load_klass(RobjKlass, R17_tos); 3977 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3978 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3979 3980 // Not a subtype; so must throw exception 3981 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3982 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3983 __ mtctr(R11_scratch1); 3984 __ bctr(); 3985 3986 // Profile the null case. 3987 __ align(32, 12); 3988 __ bind(Lis_null); 3989 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3990 3991 __ align(32, 12); 3992 __ bind(Ldone); 3993 } 3994 3995 // Output: 3996 // - tos == 0: Obj was null or not an instance of class. 3997 // - tos == 1: Obj was an instance of class. 3998 void TemplateTable::instanceof() { 3999 transition(atos, itos); 4000 4001 Label Ldone, Lis_null, Lquicked, Lresolved; 4002 Register Roffset = R6_ARG4, 4003 RobjKlass = R4_ARG2, 4004 RspecifiedKlass = R5_ARG3, 4005 Rcpool = R11_scratch1, 4006 Rtags = R12_scratch2; 4007 4008 // Null does not pass. 4009 __ cmpdi(CCR0, R17_tos, 0); 4010 __ beq(CCR0, Lis_null); 4011 4012 // Get constant pool tag to find out if the bytecode has already been "quickened". 4013 __ get_cpool_and_tags(Rcpool, Rtags); 4014 4015 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 4016 4017 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 4018 __ lbzx(Rtags, Rtags, Roffset); 4019 4020 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 4021 __ beq(CCR0, Lquicked); 4022 4023 // Call into the VM to "quicken" instanceof. 4024 __ push_ptr(); // for GC 4025 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4026 __ get_vm_result_2(RspecifiedKlass); 4027 __ pop_ptr(); // Restore receiver. 4028 __ b(Lresolved); 4029 4030 // Extract target class from constant pool. 4031 __ bind(Lquicked); 4032 __ sldi(Roffset, Roffset, LogBytesPerWord); 4033 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 4034 4035 // Do the checkcast. 4036 __ bind(Lresolved); 4037 // Get value klass in RobjKlass. 4038 __ load_klass(RobjKlass, R17_tos); 4039 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 4040 __ li(R17_tos, 1); 4041 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 4042 __ li(R17_tos, 0); 4043 4044 if (ProfileInterpreter) { 4045 __ b(Ldone); 4046 } 4047 4048 // Profile the null case. 4049 __ align(32, 12); 4050 __ bind(Lis_null); 4051 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 4052 4053 __ align(32, 12); 4054 __ bind(Ldone); 4055 } 4056 4057 // ============================================================================= 4058 // Breakpoints 4059 4060 void TemplateTable::_breakpoint() { 4061 transition(vtos, vtos); 4062 4063 // Get the unpatched byte code. 4064 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 4065 __ mr(R31, R3_RET); 4066 4067 // Post the breakpoint event. 4068 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 4069 4070 // Complete the execution of original bytecode. 4071 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 4072 } 4073 4074 // ============================================================================= 4075 // Exceptions 4076 4077 void TemplateTable::athrow() { 4078 transition(atos, vtos); 4079 4080 // Exception oop is in tos 4081 __ verify_oop(R17_tos); 4082 4083 __ null_check_throw(R17_tos, -1, R11_scratch1); 4084 4085 // Throw exception interpreter entry expects exception oop to be in R3. 4086 __ mr(R3_RET, R17_tos); 4087 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 4088 __ mtctr(R11_scratch1); 4089 __ bctr(); 4090 } 4091 4092 // ============================================================================= 4093 // Synchronization 4094 // Searches the basic object lock list on the stack for a free slot 4095 // and uses it to lock the obect in tos. 4096 // 4097 // Recursive locking is enabled by exiting the search if the same 4098 // object is already found in the list. Thus, a new basic lock obj lock 4099 // is allocated "higher up" in the stack and thus is found first 4100 // at next monitor exit. 4101 void TemplateTable::monitorenter() { 4102 transition(atos, vtos); 4103 4104 __ verify_oop(R17_tos); 4105 4106 Register Rcurrent_monitor = R11_scratch1, 4107 Rcurrent_obj = R12_scratch2, 4108 Robj_to_lock = R17_tos, 4109 Rscratch1 = R3_ARG1, 4110 Rscratch2 = R4_ARG2, 4111 Rscratch3 = R5_ARG3, 4112 Rcurrent_obj_addr = R6_ARG4; 4113 4114 // ------------------------------------------------------------------------------ 4115 // Null pointer exception. 4116 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4117 4118 // Try to acquire a lock on the object. 4119 // Repeat until succeeded (i.e., until monitorenter returns true). 4120 4121 // ------------------------------------------------------------------------------ 4122 // Find a free slot in the monitor block. 4123 Label Lfound, Lexit, Lallocate_new; 4124 ConditionRegister found_free_slot = CCR0, 4125 found_same_obj = CCR1, 4126 reached_limit = CCR6; 4127 { 4128 Label Lloop, Lentry; 4129 Register Rlimit = Rcurrent_monitor; 4130 4131 // Set up search loop - start with topmost monitor. 4132 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4133 4134 __ ld(Rlimit, 0, R1_SP); 4135 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4136 4137 // Check if any slot is present => short cut to allocation if not. 4138 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4139 __ bgt(reached_limit, Lallocate_new); 4140 4141 // Pre-load topmost slot. 4142 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4143 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4144 // The search loop. 4145 __ bind(Lloop); 4146 // Found free slot? 4147 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4148 // Is this entry for same obj? If so, stop the search and take the found 4149 // free slot or allocate a new one to enable recursive locking. 4150 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4151 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4152 __ beq(found_free_slot, Lexit); 4153 __ beq(found_same_obj, Lallocate_new); 4154 __ bgt(reached_limit, Lallocate_new); 4155 // Check if last allocated BasicLockObj reached. 4156 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4157 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4158 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4159 __ b(Lloop); 4160 } 4161 4162 // ------------------------------------------------------------------------------ 4163 // Check if we found a free slot. 4164 __ bind(Lexit); 4165 4166 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4167 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4168 __ b(Lfound); 4169 4170 // We didn't find a free BasicObjLock => allocate one. 4171 __ align(32, 12); 4172 __ bind(Lallocate_new); 4173 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4174 __ mr(Rcurrent_monitor, R26_monitor); 4175 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4176 4177 // ------------------------------------------------------------------------------ 4178 // We now have a slot to lock. 4179 __ bind(Lfound); 4180 4181 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4182 // The object has already been poped from the stack, so the expression stack looks correct. 4183 __ addi(R14_bcp, R14_bcp, 1); 4184 4185 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4186 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4187 4188 // Check if there's enough space on the stack for the monitors after locking. 4189 // This emits a single store. 4190 __ generate_stack_overflow_check(0); 4191 4192 // The bcp has already been incremented. Just need to dispatch to next instruction. 4193 __ dispatch_next(vtos); 4194 } 4195 4196 void TemplateTable::monitorexit() { 4197 transition(atos, vtos); 4198 __ verify_oop(R17_tos); 4199 4200 Register Rcurrent_monitor = R11_scratch1, 4201 Rcurrent_obj = R12_scratch2, 4202 Robj_to_lock = R17_tos, 4203 Rcurrent_obj_addr = R3_ARG1, 4204 Rlimit = R4_ARG2; 4205 Label Lfound, Lillegal_monitor_state; 4206 4207 // Check corner case: unbalanced monitorEnter / Exit. 4208 __ ld(Rlimit, 0, R1_SP); 4209 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4210 4211 // Null pointer check. 4212 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4213 4214 __ cmpld(CCR0, R26_monitor, Rlimit); 4215 __ bgt(CCR0, Lillegal_monitor_state); 4216 4217 // Find the corresponding slot in the monitors stack section. 4218 { 4219 Label Lloop; 4220 4221 // Start with topmost monitor. 4222 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4223 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4224 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4225 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4226 4227 __ bind(Lloop); 4228 // Is this entry for same obj? 4229 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4230 __ beq(CCR0, Lfound); 4231 4232 // Check if last allocated BasicLockObj reached. 4233 4234 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4235 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4236 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4237 4238 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4239 __ ble(CCR0, Lloop); 4240 } 4241 4242 // Fell through without finding the basic obj lock => throw up! 4243 __ bind(Lillegal_monitor_state); 4244 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4245 __ should_not_reach_here(); 4246 4247 __ align(32, 12); 4248 __ bind(Lfound); 4249 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4250 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4251 __ unlock_object(Rcurrent_monitor); 4252 } 4253 4254 // ============================================================================ 4255 // Wide bytecodes 4256 4257 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4258 void TemplateTable::wide() { 4259 transition(vtos, vtos); 4260 4261 const Register Rtable = R11_scratch1, 4262 Rindex = R12_scratch2, 4263 Rtmp = R0; 4264 4265 __ lbz(Rindex, 1, R14_bcp); 4266 4267 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4268 4269 __ slwi(Rindex, Rindex, LogBytesPerWord); 4270 __ ldx(Rtmp, Rtable, Rindex); 4271 __ mtctr(Rtmp); 4272 __ bctr(); 4273 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4274 }