1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #undef __ 43 #define __ _masm-> 44 45 // ============================================================================ 46 // Misc helpers 47 48 // Do an oop store like *(base + index) = val OR *(base + offset) = val 49 // (only one of both variants is possible at the same time). 50 // Index can be noreg. 51 // Kills: 52 // Rbase, Rtmp 53 static void do_oop_store(InterpreterMacroAssembler* _masm, 54 Register Rbase, 55 RegisterOrConstant offset, 56 Register Rval, // Noreg means always null. 57 Register Rtmp1, 58 Register Rtmp2, 59 Register Rtmp3, 60 BarrierSet::Name barrier, 61 bool precise, 62 bool check_null) { 63 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 64 65 switch (barrier) { 66 #if INCLUDE_ALL_GCS 67 case BarrierSet::G1BarrierSet: 68 { 69 // Load and record the previous value. 70 __ g1_write_barrier_pre(Rbase, offset, 71 Rtmp3, /* holder of pre_val ? */ 72 Rtmp1, Rtmp2, false /* frame */); 73 74 Label Lnull, Ldone; 75 if (Rval != noreg) { 76 if (check_null) { 77 __ cmpdi(CCR0, Rval, 0); 78 __ beq(CCR0, Lnull); 79 } 80 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 81 // Mark the card. 82 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 83 __ add(Rbase, offset, Rbase); 84 } 85 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 86 if (check_null) { __ b(Ldone); } 87 } 88 89 if (Rval == noreg || check_null) { // Store null oop. 90 Register Rnull = Rval; 91 __ bind(Lnull); 92 if (Rval == noreg) { 93 Rnull = Rtmp1; 94 __ li(Rnull, 0); 95 } 96 if (UseCompressedOops) { 97 __ stw(Rnull, offset, Rbase); 98 } else { 99 __ std(Rnull, offset, Rbase); 100 } 101 } 102 __ bind(Ldone); 103 } 104 break; 105 #endif // INCLUDE_ALL_GCS 106 case BarrierSet::CardTableModRef: 107 { 108 Label Lnull, Ldone; 109 if (Rval != noreg) { 110 if (check_null) { 111 __ cmpdi(CCR0, Rval, 0); 112 __ beq(CCR0, Lnull); 113 } 114 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 115 // Mark the card. 116 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 117 __ add(Rbase, offset, Rbase); 118 } 119 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 120 if (check_null) { 121 __ b(Ldone); 122 } 123 } 124 125 if (Rval == noreg || check_null) { // Store null oop. 126 Register Rnull = Rval; 127 __ bind(Lnull); 128 if (Rval == noreg) { 129 Rnull = Rtmp1; 130 __ li(Rnull, 0); 131 } 132 if (UseCompressedOops) { 133 __ stw(Rnull, offset, Rbase); 134 } else { 135 __ std(Rnull, offset, Rbase); 136 } 137 } 138 __ bind(Ldone); 139 } 140 break; 141 case BarrierSet::ModRef: 142 ShouldNotReachHere(); 143 break; 144 default: 145 ShouldNotReachHere(); 146 } 147 } 148 149 // ============================================================================ 150 // Platform-dependent initialization 151 152 void TemplateTable::pd_initialize() { 153 // No ppc64 specific initialization. 154 } 155 156 Address TemplateTable::at_bcp(int offset) { 157 // Not used on ppc. 158 ShouldNotReachHere(); 159 return Address(); 160 } 161 162 // Patches the current bytecode (ptr to it located in bcp) 163 // in the bytecode stream with a new one. 164 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 165 // With sharing on, may need to test method flag. 166 if (!RewriteBytecodes) return; 167 Label L_patch_done; 168 169 switch (new_bc) { 170 case Bytecodes::_fast_aputfield: 171 case Bytecodes::_fast_bputfield: 172 case Bytecodes::_fast_zputfield: 173 case Bytecodes::_fast_cputfield: 174 case Bytecodes::_fast_dputfield: 175 case Bytecodes::_fast_fputfield: 176 case Bytecodes::_fast_iputfield: 177 case Bytecodes::_fast_lputfield: 178 case Bytecodes::_fast_sputfield: 179 { 180 // We skip bytecode quickening for putfield instructions when 181 // the put_code written to the constant pool cache is zero. 182 // This is required so that every execution of this instruction 183 // calls out to InterpreterRuntime::resolve_get_put to do 184 // additional, required work. 185 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 186 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 187 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 188 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 189 #if defined(VM_LITTLE_ENDIAN) 190 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 191 #else 192 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 193 #endif 194 __ cmpwi(CCR0, Rnew_bc, 0); 195 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 196 __ beq(CCR0, L_patch_done); 197 // __ isync(); // acquire not needed 198 break; 199 } 200 201 default: 202 assert(byte_no == -1, "sanity"); 203 if (load_bc_into_bc_reg) { 204 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 205 } 206 } 207 208 if (JvmtiExport::can_post_breakpoint()) { 209 Label L_fast_patch; 210 __ lbz(Rtemp, 0, R14_bcp); 211 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 212 __ bne(CCR0, L_fast_patch); 213 // Perform the quickening, slowly, in the bowels of the breakpoint table. 214 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 215 __ b(L_patch_done); 216 __ bind(L_fast_patch); 217 } 218 219 // Patch bytecode. 220 __ stb(Rnew_bc, 0, R14_bcp); 221 222 __ bind(L_patch_done); 223 } 224 225 // ============================================================================ 226 // Individual instructions 227 228 void TemplateTable::nop() { 229 transition(vtos, vtos); 230 // Nothing to do. 231 } 232 233 void TemplateTable::shouldnotreachhere() { 234 transition(vtos, vtos); 235 __ stop("shouldnotreachhere bytecode"); 236 } 237 238 void TemplateTable::aconst_null() { 239 transition(vtos, atos); 240 __ li(R17_tos, 0); 241 } 242 243 void TemplateTable::iconst(int value) { 244 transition(vtos, itos); 245 assert(value >= -1 && value <= 5, ""); 246 __ li(R17_tos, value); 247 } 248 249 void TemplateTable::lconst(int value) { 250 transition(vtos, ltos); 251 assert(value >= -1 && value <= 5, ""); 252 __ li(R17_tos, value); 253 } 254 255 void TemplateTable::fconst(int value) { 256 transition(vtos, ftos); 257 static float zero = 0.0; 258 static float one = 1.0; 259 static float two = 2.0; 260 switch (value) { 261 default: ShouldNotReachHere(); 262 case 0: { 263 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 264 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 265 break; 266 } 267 case 1: { 268 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 269 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 270 break; 271 } 272 case 2: { 273 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 274 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 275 break; 276 } 277 } 278 } 279 280 void TemplateTable::dconst(int value) { 281 transition(vtos, dtos); 282 static double zero = 0.0; 283 static double one = 1.0; 284 switch (value) { 285 case 0: { 286 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 287 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 288 break; 289 } 290 case 1: { 291 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 292 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 293 break; 294 } 295 default: ShouldNotReachHere(); 296 } 297 } 298 299 void TemplateTable::bipush() { 300 transition(vtos, itos); 301 __ lbz(R17_tos, 1, R14_bcp); 302 __ extsb(R17_tos, R17_tos); 303 } 304 305 void TemplateTable::sipush() { 306 transition(vtos, itos); 307 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 308 } 309 310 void TemplateTable::ldc(bool wide) { 311 Register Rscratch1 = R11_scratch1, 312 Rscratch2 = R12_scratch2, 313 Rcpool = R3_ARG1; 314 315 transition(vtos, vtos); 316 Label notInt, notFloat, notClass, exit; 317 318 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 319 if (wide) { // Read index. 320 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 321 } else { 322 __ lbz(Rscratch1, 1, R14_bcp); 323 } 324 325 const int base_offset = ConstantPool::header_size() * wordSize; 326 const int tags_offset = Array<u1>::base_offset_in_bytes(); 327 328 // Get type from tags. 329 __ addi(Rscratch2, Rscratch2, tags_offset); 330 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 331 332 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 333 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 334 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 335 336 // Resolved class - need to call vm to get java mirror of the class. 337 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 338 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 339 __ beq(CCR0, notClass); 340 341 __ li(R4, wide ? 1 : 0); 342 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 343 __ push(atos); 344 __ b(exit); 345 346 __ align(32, 12); 347 __ bind(notClass); 348 __ addi(Rcpool, Rcpool, base_offset); 349 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 350 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 351 __ bne(CCR0, notInt); 352 __ lwax(R17_tos, Rcpool, Rscratch1); 353 __ push(itos); 354 __ b(exit); 355 356 __ align(32, 12); 357 __ bind(notInt); 358 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 359 __ bne(CCR0, notFloat); 360 __ lfsx(F15_ftos, Rcpool, Rscratch1); 361 __ push(ftos); 362 __ b(exit); 363 364 __ align(32, 12); 365 // assume the tag is for condy; if not, the VM runtime will tell us 366 __ bind(notFloat); 367 condy_helper(exit); 368 369 __ align(32, 12); 370 __ bind(exit); 371 } 372 373 // Fast path for caching oop constants. 374 void TemplateTable::fast_aldc(bool wide) { 375 transition(vtos, atos); 376 377 int index_size = wide ? sizeof(u2) : sizeof(u1); 378 const Register Rscratch = R11_scratch1; 379 Label is_null; 380 381 // We are resolved if the resolved reference cache entry contains a 382 // non-null object (CallSite, etc.) 383 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 384 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 385 386 // Convert null sentinel to NULL. 387 int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); 388 __ ld(Rscratch, simm16_rest, Rscratch); 389 __ cmpld(CCR0, R17_tos, Rscratch); 390 if (VM_Version::has_isel()) { 391 __ isel_0(R17_tos, CCR0, Assembler::equal); 392 } else { 393 Label not_sentinel; 394 __ bne(CCR0, not_sentinel); 395 __ li(R17_tos, 0); 396 __ bind(not_sentinel); 397 } 398 __ verify_oop(R17_tos); 399 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 400 401 __ bind(is_null); 402 __ load_const_optimized(R3_ARG1, (int)bytecode()); 403 404 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 405 406 // First time invocation - must resolve first. 407 __ call_VM(R17_tos, entry, R3_ARG1); 408 __ verify_oop(R17_tos); 409 } 410 411 void TemplateTable::ldc2_w() { 412 transition(vtos, vtos); 413 Label not_double, not_long, exit; 414 415 Register Rindex = R11_scratch1, 416 Rcpool = R12_scratch2, 417 Rtag = R3_ARG1; 418 __ get_cpool_and_tags(Rcpool, Rtag); 419 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 420 421 const int base_offset = ConstantPool::header_size() * wordSize; 422 const int tags_offset = Array<u1>::base_offset_in_bytes(); 423 // Get type from tags. 424 __ addi(Rcpool, Rcpool, base_offset); 425 __ addi(Rtag, Rtag, tags_offset); 426 427 __ lbzx(Rtag, Rtag, Rindex); 428 __ sldi(Rindex, Rindex, LogBytesPerWord); 429 430 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 431 __ bne(CCR0, not_double); 432 __ lfdx(F15_ftos, Rcpool, Rindex); 433 __ push(dtos); 434 __ b(exit); 435 436 __ bind(not_double); 437 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); 438 __ bne(CCR0, not_long); 439 __ ldx(R17_tos, Rcpool, Rindex); 440 __ push(ltos); 441 __ b(exit); 442 443 __ bind(not_long); 444 condy_helper(exit); 445 446 __ align(32, 12); 447 __ bind(exit); 448 } 449 450 void TemplateTable::condy_helper(Label& Done) { 451 const Register obj = R31; 452 const Register off = R11_scratch1; 453 const Register flags = R12_scratch2; 454 const Register rarg = R4_ARG2; 455 __ li(rarg, (int)bytecode()); 456 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); 457 __ get_vm_result_2(flags); 458 459 // VMr = obj = base address to find primitive value to push 460 // VMr2 = flags = (tos, off) using format of CPCE::_flags 461 __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); 462 463 // What sort of thing are we loading? 464 __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 465 466 switch (bytecode()) { 467 case Bytecodes::_ldc: 468 case Bytecodes::_ldc_w: 469 { 470 // tos in (itos, ftos, stos, btos, ctos, ztos) 471 Label notInt, notFloat, notShort, notByte, notChar, notBool; 472 __ cmplwi(CCR0, flags, itos); 473 __ bne(CCR0, notInt); 474 // itos 475 __ lwax(R17_tos, obj, off); 476 __ push(itos); 477 __ b(Done); 478 479 __ bind(notInt); 480 __ cmplwi(CCR0, flags, ftos); 481 __ bne(CCR0, notFloat); 482 // ftos 483 __ lfsx(F15_ftos, obj, off); 484 __ push(ftos); 485 __ b(Done); 486 487 __ bind(notFloat); 488 __ cmplwi(CCR0, flags, stos); 489 __ bne(CCR0, notShort); 490 // stos 491 __ lhax(R17_tos, obj, off); 492 __ push(stos); 493 __ b(Done); 494 495 __ bind(notShort); 496 __ cmplwi(CCR0, flags, btos); 497 __ bne(CCR0, notByte); 498 // btos 499 __ lbzx(R17_tos, obj, off); 500 __ extsb(R17_tos, R17_tos); 501 __ push(btos); 502 __ b(Done); 503 504 __ bind(notByte); 505 __ cmplwi(CCR0, flags, ctos); 506 __ bne(CCR0, notChar); 507 // ctos 508 __ lhzx(R17_tos, obj, off); 509 __ push(ctos); 510 __ b(Done); 511 512 __ bind(notChar); 513 __ cmplwi(CCR0, flags, ztos); 514 __ bne(CCR0, notBool); 515 // ztos 516 __ lbzx(R17_tos, obj, off); 517 __ push(ztos); 518 __ b(Done); 519 520 __ bind(notBool); 521 break; 522 } 523 524 case Bytecodes::_ldc2_w: 525 { 526 Label notLong, notDouble; 527 __ cmplwi(CCR0, flags, ltos); 528 __ bne(CCR0, notLong); 529 // ltos 530 __ ldx(R17_tos, obj, off); 531 __ push(ltos); 532 __ b(Done); 533 534 __ bind(notLong); 535 __ cmplwi(CCR0, flags, dtos); 536 __ bne(CCR0, notDouble); 537 // dtos 538 __ lfdx(F15_ftos, obj, off); 539 __ push(dtos); 540 __ b(Done); 541 542 __ bind(notDouble); 543 break; 544 } 545 546 default: 547 ShouldNotReachHere(); 548 } 549 550 __ stop("bad ldc/condy"); 551 } 552 553 // Get the locals index located in the bytecode stream at bcp + offset. 554 void TemplateTable::locals_index(Register Rdst, int offset) { 555 __ lbz(Rdst, offset, R14_bcp); 556 } 557 558 void TemplateTable::iload() { 559 iload_internal(); 560 } 561 562 void TemplateTable::nofast_iload() { 563 iload_internal(may_not_rewrite); 564 } 565 566 void TemplateTable::iload_internal(RewriteControl rc) { 567 transition(vtos, itos); 568 569 // Get the local value into tos 570 const Register Rindex = R22_tmp2; 571 locals_index(Rindex); 572 573 // Rewrite iload,iload pair into fast_iload2 574 // iload,caload pair into fast_icaload 575 if (RewriteFrequentPairs && rc == may_rewrite) { 576 Label Lrewrite, Ldone; 577 Register Rnext_byte = R3_ARG1, 578 Rrewrite_to = R6_ARG4, 579 Rscratch = R11_scratch1; 580 581 // get next byte 582 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 583 584 // if _iload, wait to rewrite to iload2. We only want to rewrite the 585 // last two iloads in a pair. Comparing against fast_iload means that 586 // the next bytecode is neither an iload or a caload, and therefore 587 // an iload pair. 588 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 589 __ beq(CCR0, Ldone); 590 591 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 592 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 593 __ beq(CCR1, Lrewrite); 594 595 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 596 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 597 __ beq(CCR0, Lrewrite); 598 599 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 600 601 __ bind(Lrewrite); 602 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 603 __ bind(Ldone); 604 } 605 606 __ load_local_int(R17_tos, Rindex, Rindex); 607 } 608 609 // Load 2 integers in a row without dispatching 610 void TemplateTable::fast_iload2() { 611 transition(vtos, itos); 612 613 __ lbz(R3_ARG1, 1, R14_bcp); 614 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 615 616 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 617 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 618 __ push_i(R3_ARG1); 619 } 620 621 void TemplateTable::fast_iload() { 622 transition(vtos, itos); 623 // Get the local value into tos 624 625 const Register Rindex = R11_scratch1; 626 locals_index(Rindex); 627 __ load_local_int(R17_tos, Rindex, Rindex); 628 } 629 630 // Load a local variable type long from locals area to TOS cache register. 631 // Local index resides in bytecodestream. 632 void TemplateTable::lload() { 633 transition(vtos, ltos); 634 635 const Register Rindex = R11_scratch1; 636 locals_index(Rindex); 637 __ load_local_long(R17_tos, Rindex, Rindex); 638 } 639 640 void TemplateTable::fload() { 641 transition(vtos, ftos); 642 643 const Register Rindex = R11_scratch1; 644 locals_index(Rindex); 645 __ load_local_float(F15_ftos, Rindex, Rindex); 646 } 647 648 void TemplateTable::dload() { 649 transition(vtos, dtos); 650 651 const Register Rindex = R11_scratch1; 652 locals_index(Rindex); 653 __ load_local_double(F15_ftos, Rindex, Rindex); 654 } 655 656 void TemplateTable::aload() { 657 transition(vtos, atos); 658 659 const Register Rindex = R11_scratch1; 660 locals_index(Rindex); 661 __ load_local_ptr(R17_tos, Rindex, Rindex); 662 } 663 664 void TemplateTable::locals_index_wide(Register Rdst) { 665 // Offset is 2, not 1, because Lbcp points to wide prefix code. 666 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 667 } 668 669 void TemplateTable::wide_iload() { 670 // Get the local value into tos. 671 672 const Register Rindex = R11_scratch1; 673 locals_index_wide(Rindex); 674 __ load_local_int(R17_tos, Rindex, Rindex); 675 } 676 677 void TemplateTable::wide_lload() { 678 transition(vtos, ltos); 679 680 const Register Rindex = R11_scratch1; 681 locals_index_wide(Rindex); 682 __ load_local_long(R17_tos, Rindex, Rindex); 683 } 684 685 void TemplateTable::wide_fload() { 686 transition(vtos, ftos); 687 688 const Register Rindex = R11_scratch1; 689 locals_index_wide(Rindex); 690 __ load_local_float(F15_ftos, Rindex, Rindex); 691 } 692 693 void TemplateTable::wide_dload() { 694 transition(vtos, dtos); 695 696 const Register Rindex = R11_scratch1; 697 locals_index_wide(Rindex); 698 __ load_local_double(F15_ftos, Rindex, Rindex); 699 } 700 701 void TemplateTable::wide_aload() { 702 transition(vtos, atos); 703 704 const Register Rindex = R11_scratch1; 705 locals_index_wide(Rindex); 706 __ load_local_ptr(R17_tos, Rindex, Rindex); 707 } 708 709 void TemplateTable::iaload() { 710 transition(itos, itos); 711 712 const Register Rload_addr = R3_ARG1, 713 Rarray = R4_ARG2, 714 Rtemp = R5_ARG3; 715 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 716 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 717 } 718 719 void TemplateTable::laload() { 720 transition(itos, ltos); 721 722 const Register Rload_addr = R3_ARG1, 723 Rarray = R4_ARG2, 724 Rtemp = R5_ARG3; 725 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 726 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 727 } 728 729 void TemplateTable::faload() { 730 transition(itos, ftos); 731 732 const Register Rload_addr = R3_ARG1, 733 Rarray = R4_ARG2, 734 Rtemp = R5_ARG3; 735 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 736 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 737 } 738 739 void TemplateTable::daload() { 740 transition(itos, dtos); 741 742 const Register Rload_addr = R3_ARG1, 743 Rarray = R4_ARG2, 744 Rtemp = R5_ARG3; 745 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 746 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 747 } 748 749 void TemplateTable::aaload() { 750 transition(itos, atos); 751 752 // tos: index 753 // result tos: array 754 const Register Rload_addr = R3_ARG1, 755 Rarray = R4_ARG2, 756 Rtemp = R5_ARG3; 757 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 758 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 759 __ verify_oop(R17_tos); 760 //__ dcbt(R17_tos); // prefetch 761 } 762 763 void TemplateTable::baload() { 764 transition(itos, itos); 765 766 const Register Rload_addr = R3_ARG1, 767 Rarray = R4_ARG2, 768 Rtemp = R5_ARG3; 769 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 770 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 771 __ extsb(R17_tos, R17_tos); 772 } 773 774 void TemplateTable::caload() { 775 transition(itos, itos); 776 777 const Register Rload_addr = R3_ARG1, 778 Rarray = R4_ARG2, 779 Rtemp = R5_ARG3; 780 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 781 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 782 } 783 784 // Iload followed by caload frequent pair. 785 void TemplateTable::fast_icaload() { 786 transition(vtos, itos); 787 788 const Register Rload_addr = R3_ARG1, 789 Rarray = R4_ARG2, 790 Rtemp = R11_scratch1; 791 792 locals_index(R17_tos); 793 __ load_local_int(R17_tos, Rtemp, R17_tos); 794 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 795 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 796 } 797 798 void TemplateTable::saload() { 799 transition(itos, itos); 800 801 const Register Rload_addr = R11_scratch1, 802 Rarray = R12_scratch2, 803 Rtemp = R3_ARG1; 804 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 805 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 806 } 807 808 void TemplateTable::iload(int n) { 809 transition(vtos, itos); 810 811 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 812 } 813 814 void TemplateTable::lload(int n) { 815 transition(vtos, ltos); 816 817 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 818 } 819 820 void TemplateTable::fload(int n) { 821 transition(vtos, ftos); 822 823 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 824 } 825 826 void TemplateTable::dload(int n) { 827 transition(vtos, dtos); 828 829 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 830 } 831 832 void TemplateTable::aload(int n) { 833 transition(vtos, atos); 834 835 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 836 } 837 838 void TemplateTable::aload_0() { 839 aload_0_internal(); 840 } 841 842 void TemplateTable::nofast_aload_0() { 843 aload_0_internal(may_not_rewrite); 844 } 845 846 void TemplateTable::aload_0_internal(RewriteControl rc) { 847 transition(vtos, atos); 848 // According to bytecode histograms, the pairs: 849 // 850 // _aload_0, _fast_igetfield 851 // _aload_0, _fast_agetfield 852 // _aload_0, _fast_fgetfield 853 // 854 // occur frequently. If RewriteFrequentPairs is set, the (slow) 855 // _aload_0 bytecode checks if the next bytecode is either 856 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 857 // rewrites the current bytecode into a pair bytecode; otherwise it 858 // rewrites the current bytecode into _0 that doesn't do 859 // the pair check anymore. 860 // 861 // Note: If the next bytecode is _getfield, the rewrite must be 862 // delayed, otherwise we may miss an opportunity for a pair. 863 // 864 // Also rewrite frequent pairs 865 // aload_0, aload_1 866 // aload_0, iload_1 867 // These bytecodes with a small amount of code are most profitable 868 // to rewrite. 869 870 if (RewriteFrequentPairs && rc == may_rewrite) { 871 872 Label Lrewrite, Ldont_rewrite; 873 Register Rnext_byte = R3_ARG1, 874 Rrewrite_to = R6_ARG4, 875 Rscratch = R11_scratch1; 876 877 // Get next byte. 878 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 879 880 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 881 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 882 __ beq(CCR0, Ldont_rewrite); 883 884 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 885 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 886 __ beq(CCR1, Lrewrite); 887 888 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 889 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 890 __ beq(CCR0, Lrewrite); 891 892 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 893 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 894 __ beq(CCR1, Lrewrite); 895 896 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 897 898 __ bind(Lrewrite); 899 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 900 __ bind(Ldont_rewrite); 901 } 902 903 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 904 aload(0); 905 } 906 907 void TemplateTable::istore() { 908 transition(itos, vtos); 909 910 const Register Rindex = R11_scratch1; 911 locals_index(Rindex); 912 __ store_local_int(R17_tos, Rindex); 913 } 914 915 void TemplateTable::lstore() { 916 transition(ltos, vtos); 917 const Register Rindex = R11_scratch1; 918 locals_index(Rindex); 919 __ store_local_long(R17_tos, Rindex); 920 } 921 922 void TemplateTable::fstore() { 923 transition(ftos, vtos); 924 925 const Register Rindex = R11_scratch1; 926 locals_index(Rindex); 927 __ store_local_float(F15_ftos, Rindex); 928 } 929 930 void TemplateTable::dstore() { 931 transition(dtos, vtos); 932 933 const Register Rindex = R11_scratch1; 934 locals_index(Rindex); 935 __ store_local_double(F15_ftos, Rindex); 936 } 937 938 void TemplateTable::astore() { 939 transition(vtos, vtos); 940 941 const Register Rindex = R11_scratch1; 942 __ pop_ptr(); 943 __ verify_oop_or_return_address(R17_tos, Rindex); 944 locals_index(Rindex); 945 __ store_local_ptr(R17_tos, Rindex); 946 } 947 948 void TemplateTable::wide_istore() { 949 transition(vtos, vtos); 950 951 const Register Rindex = R11_scratch1; 952 __ pop_i(); 953 locals_index_wide(Rindex); 954 __ store_local_int(R17_tos, Rindex); 955 } 956 957 void TemplateTable::wide_lstore() { 958 transition(vtos, vtos); 959 960 const Register Rindex = R11_scratch1; 961 __ pop_l(); 962 locals_index_wide(Rindex); 963 __ store_local_long(R17_tos, Rindex); 964 } 965 966 void TemplateTable::wide_fstore() { 967 transition(vtos, vtos); 968 969 const Register Rindex = R11_scratch1; 970 __ pop_f(); 971 locals_index_wide(Rindex); 972 __ store_local_float(F15_ftos, Rindex); 973 } 974 975 void TemplateTable::wide_dstore() { 976 transition(vtos, vtos); 977 978 const Register Rindex = R11_scratch1; 979 __ pop_d(); 980 locals_index_wide(Rindex); 981 __ store_local_double(F15_ftos, Rindex); 982 } 983 984 void TemplateTable::wide_astore() { 985 transition(vtos, vtos); 986 987 const Register Rindex = R11_scratch1; 988 __ pop_ptr(); 989 __ verify_oop_or_return_address(R17_tos, Rindex); 990 locals_index_wide(Rindex); 991 __ store_local_ptr(R17_tos, Rindex); 992 } 993 994 void TemplateTable::iastore() { 995 transition(itos, vtos); 996 997 const Register Rindex = R3_ARG1, 998 Rstore_addr = R4_ARG2, 999 Rarray = R5_ARG3, 1000 Rtemp = R6_ARG4; 1001 __ pop_i(Rindex); 1002 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 1003 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 1004 } 1005 1006 void TemplateTable::lastore() { 1007 transition(ltos, vtos); 1008 1009 const Register Rindex = R3_ARG1, 1010 Rstore_addr = R4_ARG2, 1011 Rarray = R5_ARG3, 1012 Rtemp = R6_ARG4; 1013 __ pop_i(Rindex); 1014 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 1015 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 1016 } 1017 1018 void TemplateTable::fastore() { 1019 transition(ftos, vtos); 1020 1021 const Register Rindex = R3_ARG1, 1022 Rstore_addr = R4_ARG2, 1023 Rarray = R5_ARG3, 1024 Rtemp = R6_ARG4; 1025 __ pop_i(Rindex); 1026 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 1027 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 1028 } 1029 1030 void TemplateTable::dastore() { 1031 transition(dtos, vtos); 1032 1033 const Register Rindex = R3_ARG1, 1034 Rstore_addr = R4_ARG2, 1035 Rarray = R5_ARG3, 1036 Rtemp = R6_ARG4; 1037 __ pop_i(Rindex); 1038 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 1039 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 1040 } 1041 1042 // Pop 3 values from the stack and... 1043 void TemplateTable::aastore() { 1044 transition(vtos, vtos); 1045 1046 Label Lstore_ok, Lis_null, Ldone; 1047 const Register Rindex = R3_ARG1, 1048 Rarray = R4_ARG2, 1049 Rscratch = R11_scratch1, 1050 Rscratch2 = R12_scratch2, 1051 Rarray_klass = R5_ARG3, 1052 Rarray_element_klass = Rarray_klass, 1053 Rvalue_klass = R6_ARG4, 1054 Rstore_addr = R31; // Use register which survives VM call. 1055 1056 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 1057 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 1058 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 1059 1060 __ verify_oop(R17_tos); 1061 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 1062 // Rindex is dead! 1063 Register Rscratch3 = Rindex; 1064 1065 // Do array store check - check for NULL value first. 1066 __ cmpdi(CCR0, R17_tos, 0); 1067 __ beq(CCR0, Lis_null); 1068 1069 __ load_klass(Rarray_klass, Rarray); 1070 __ load_klass(Rvalue_klass, R17_tos); 1071 1072 // Do fast instanceof cache test. 1073 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 1074 1075 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 1076 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 1077 1078 // Fell through: subtype check failed => throw an exception. 1079 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 1080 __ mtctr(R11_scratch1); 1081 __ bctr(); 1082 1083 __ bind(Lis_null); 1084 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 1085 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 1086 __ profile_null_seen(Rscratch, Rscratch2); 1087 __ b(Ldone); 1088 1089 // Store is OK. 1090 __ bind(Lstore_ok); 1091 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 1092 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 1093 1094 __ bind(Ldone); 1095 // Adjust sp (pops array, index and value). 1096 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 1097 } 1098 1099 void TemplateTable::bastore() { 1100 transition(itos, vtos); 1101 1102 const Register Rindex = R11_scratch1, 1103 Rarray = R12_scratch2, 1104 Rscratch = R3_ARG1; 1105 __ pop_i(Rindex); 1106 __ pop_ptr(Rarray); 1107 // tos: val 1108 1109 // Need to check whether array is boolean or byte 1110 // since both types share the bastore bytecode. 1111 __ load_klass(Rscratch, Rarray); 1112 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 1113 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 1114 __ testbitdi(CCR0, R0, Rscratch, diffbit); 1115 Label L_skip; 1116 __ bfalse(CCR0, L_skip); 1117 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1118 __ bind(L_skip); 1119 1120 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 1121 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 1122 } 1123 1124 void TemplateTable::castore() { 1125 transition(itos, vtos); 1126 1127 const Register Rindex = R11_scratch1, 1128 Rarray = R12_scratch2, 1129 Rscratch = R3_ARG1; 1130 __ pop_i(Rindex); 1131 // tos: val 1132 // Rarray: array ptr (popped by index_check) 1133 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1134 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1135 } 1136 1137 void TemplateTable::sastore() { 1138 castore(); 1139 } 1140 1141 void TemplateTable::istore(int n) { 1142 transition(itos, vtos); 1143 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1144 } 1145 1146 void TemplateTable::lstore(int n) { 1147 transition(ltos, vtos); 1148 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1149 } 1150 1151 void TemplateTable::fstore(int n) { 1152 transition(ftos, vtos); 1153 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1154 } 1155 1156 void TemplateTable::dstore(int n) { 1157 transition(dtos, vtos); 1158 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1159 } 1160 1161 void TemplateTable::astore(int n) { 1162 transition(vtos, vtos); 1163 1164 __ pop_ptr(); 1165 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1166 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1167 } 1168 1169 void TemplateTable::pop() { 1170 transition(vtos, vtos); 1171 1172 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1173 } 1174 1175 void TemplateTable::pop2() { 1176 transition(vtos, vtos); 1177 1178 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1179 } 1180 1181 void TemplateTable::dup() { 1182 transition(vtos, vtos); 1183 1184 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1185 __ push_ptr(R11_scratch1); 1186 } 1187 1188 void TemplateTable::dup_x1() { 1189 transition(vtos, vtos); 1190 1191 Register Ra = R11_scratch1, 1192 Rb = R12_scratch2; 1193 // stack: ..., a, b 1194 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1195 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1196 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1197 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1198 __ push_ptr(Rb); 1199 // stack: ..., b, a, b 1200 } 1201 1202 void TemplateTable::dup_x2() { 1203 transition(vtos, vtos); 1204 1205 Register Ra = R11_scratch1, 1206 Rb = R12_scratch2, 1207 Rc = R3_ARG1; 1208 1209 // stack: ..., a, b, c 1210 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1211 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1212 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1213 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1214 // stack: ..., c, b, c 1215 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1216 // stack: ..., c, a, c 1217 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1218 __ push_ptr(Rc); // push c 1219 // stack: ..., c, a, b, c 1220 } 1221 1222 void TemplateTable::dup2() { 1223 transition(vtos, vtos); 1224 1225 Register Ra = R11_scratch1, 1226 Rb = R12_scratch2; 1227 // stack: ..., a, b 1228 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1229 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1230 __ push_2ptrs(Ra, Rb); 1231 // stack: ..., a, b, a, b 1232 } 1233 1234 void TemplateTable::dup2_x1() { 1235 transition(vtos, vtos); 1236 1237 Register Ra = R11_scratch1, 1238 Rb = R12_scratch2, 1239 Rc = R3_ARG1; 1240 // stack: ..., a, b, c 1241 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1242 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1243 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1244 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1245 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1246 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1247 // stack: ..., b, c, a 1248 __ push_2ptrs(Rb, Rc); 1249 // stack: ..., b, c, a, b, c 1250 } 1251 1252 void TemplateTable::dup2_x2() { 1253 transition(vtos, vtos); 1254 1255 Register Ra = R11_scratch1, 1256 Rb = R12_scratch2, 1257 Rc = R3_ARG1, 1258 Rd = R4_ARG2; 1259 // stack: ..., a, b, c, d 1260 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1261 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1262 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1263 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1264 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1265 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1266 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1267 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1268 // stack: ..., c, d, a, b 1269 __ push_2ptrs(Rc, Rd); 1270 // stack: ..., c, d, a, b, c, d 1271 } 1272 1273 void TemplateTable::swap() { 1274 transition(vtos, vtos); 1275 // stack: ..., a, b 1276 1277 Register Ra = R11_scratch1, 1278 Rb = R12_scratch2; 1279 // stack: ..., a, b 1280 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1281 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1282 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1283 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1284 // stack: ..., b, a 1285 } 1286 1287 void TemplateTable::iop2(Operation op) { 1288 transition(itos, itos); 1289 1290 Register Rscratch = R11_scratch1; 1291 1292 __ pop_i(Rscratch); 1293 // tos = number of bits to shift 1294 // Rscratch = value to shift 1295 switch (op) { 1296 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1297 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1298 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1299 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1300 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1301 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1302 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1303 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1304 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1305 default: ShouldNotReachHere(); 1306 } 1307 } 1308 1309 void TemplateTable::lop2(Operation op) { 1310 transition(ltos, ltos); 1311 1312 Register Rscratch = R11_scratch1; 1313 __ pop_l(Rscratch); 1314 switch (op) { 1315 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1316 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1317 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1318 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1319 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1320 default: ShouldNotReachHere(); 1321 } 1322 } 1323 1324 void TemplateTable::idiv() { 1325 transition(itos, itos); 1326 1327 Label Lnormal, Lexception, Ldone; 1328 Register Rdividend = R11_scratch1; // Used by irem. 1329 1330 __ addi(R0, R17_tos, 1); 1331 __ cmplwi(CCR0, R0, 2); 1332 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1333 1334 __ cmpwi(CCR1, R17_tos, 0); 1335 __ beq(CCR1, Lexception); // divisor == 0 1336 1337 __ pop_i(Rdividend); 1338 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1339 __ b(Ldone); 1340 1341 __ bind(Lexception); 1342 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1343 __ mtctr(R11_scratch1); 1344 __ bctr(); 1345 1346 __ align(32, 12); 1347 __ bind(Lnormal); 1348 __ pop_i(Rdividend); 1349 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1350 __ bind(Ldone); 1351 } 1352 1353 void TemplateTable::irem() { 1354 transition(itos, itos); 1355 1356 __ mr(R12_scratch2, R17_tos); 1357 idiv(); 1358 __ mullw(R17_tos, R17_tos, R12_scratch2); 1359 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1360 } 1361 1362 void TemplateTable::lmul() { 1363 transition(ltos, ltos); 1364 1365 __ pop_l(R11_scratch1); 1366 __ mulld(R17_tos, R11_scratch1, R17_tos); 1367 } 1368 1369 void TemplateTable::ldiv() { 1370 transition(ltos, ltos); 1371 1372 Label Lnormal, Lexception, Ldone; 1373 Register Rdividend = R11_scratch1; // Used by lrem. 1374 1375 __ addi(R0, R17_tos, 1); 1376 __ cmpldi(CCR0, R0, 2); 1377 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1378 1379 __ cmpdi(CCR1, R17_tos, 0); 1380 __ beq(CCR1, Lexception); // divisor == 0 1381 1382 __ pop_l(Rdividend); 1383 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1384 __ b(Ldone); 1385 1386 __ bind(Lexception); 1387 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1388 __ mtctr(R11_scratch1); 1389 __ bctr(); 1390 1391 __ align(32, 12); 1392 __ bind(Lnormal); 1393 __ pop_l(Rdividend); 1394 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1395 __ bind(Ldone); 1396 } 1397 1398 void TemplateTable::lrem() { 1399 transition(ltos, ltos); 1400 1401 __ mr(R12_scratch2, R17_tos); 1402 ldiv(); 1403 __ mulld(R17_tos, R17_tos, R12_scratch2); 1404 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1405 } 1406 1407 void TemplateTable::lshl() { 1408 transition(itos, ltos); 1409 1410 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1411 __ pop_l(R11_scratch1); 1412 __ sld(R17_tos, R11_scratch1, R17_tos); 1413 } 1414 1415 void TemplateTable::lshr() { 1416 transition(itos, ltos); 1417 1418 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1419 __ pop_l(R11_scratch1); 1420 __ srad(R17_tos, R11_scratch1, R17_tos); 1421 } 1422 1423 void TemplateTable::lushr() { 1424 transition(itos, ltos); 1425 1426 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1427 __ pop_l(R11_scratch1); 1428 __ srd(R17_tos, R11_scratch1, R17_tos); 1429 } 1430 1431 void TemplateTable::fop2(Operation op) { 1432 transition(ftos, ftos); 1433 1434 switch (op) { 1435 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1436 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1437 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1438 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1439 case rem: 1440 __ pop_f(F1_ARG1); 1441 __ fmr(F2_ARG2, F15_ftos); 1442 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1443 __ fmr(F15_ftos, F1_RET); 1444 break; 1445 1446 default: ShouldNotReachHere(); 1447 } 1448 } 1449 1450 void TemplateTable::dop2(Operation op) { 1451 transition(dtos, dtos); 1452 1453 switch (op) { 1454 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1455 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1456 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1457 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1458 case rem: 1459 __ pop_d(F1_ARG1); 1460 __ fmr(F2_ARG2, F15_ftos); 1461 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1462 __ fmr(F15_ftos, F1_RET); 1463 break; 1464 1465 default: ShouldNotReachHere(); 1466 } 1467 } 1468 1469 // Negate the value in the TOS cache. 1470 void TemplateTable::ineg() { 1471 transition(itos, itos); 1472 1473 __ neg(R17_tos, R17_tos); 1474 } 1475 1476 // Negate the value in the TOS cache. 1477 void TemplateTable::lneg() { 1478 transition(ltos, ltos); 1479 1480 __ neg(R17_tos, R17_tos); 1481 } 1482 1483 void TemplateTable::fneg() { 1484 transition(ftos, ftos); 1485 1486 __ fneg(F15_ftos, F15_ftos); 1487 } 1488 1489 void TemplateTable::dneg() { 1490 transition(dtos, dtos); 1491 1492 __ fneg(F15_ftos, F15_ftos); 1493 } 1494 1495 // Increments a local variable in place. 1496 void TemplateTable::iinc() { 1497 transition(vtos, vtos); 1498 1499 const Register Rindex = R11_scratch1, 1500 Rincrement = R0, 1501 Rvalue = R12_scratch2; 1502 1503 locals_index(Rindex); // Load locals index from bytecode stream. 1504 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1505 __ extsb(Rincrement, Rincrement); 1506 1507 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1508 1509 __ add(Rvalue, Rincrement, Rvalue); 1510 __ stw(Rvalue, 0, Rindex); 1511 } 1512 1513 void TemplateTable::wide_iinc() { 1514 transition(vtos, vtos); 1515 1516 Register Rindex = R11_scratch1, 1517 Rlocals_addr = Rindex, 1518 Rincr = R12_scratch2; 1519 locals_index_wide(Rindex); 1520 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1521 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1522 __ add(R17_tos, Rincr, R17_tos); 1523 __ stw(R17_tos, 0, Rlocals_addr); 1524 } 1525 1526 void TemplateTable::convert() { 1527 // %%%%% Factor this first part accross platforms 1528 #ifdef ASSERT 1529 TosState tos_in = ilgl; 1530 TosState tos_out = ilgl; 1531 switch (bytecode()) { 1532 case Bytecodes::_i2l: // fall through 1533 case Bytecodes::_i2f: // fall through 1534 case Bytecodes::_i2d: // fall through 1535 case Bytecodes::_i2b: // fall through 1536 case Bytecodes::_i2c: // fall through 1537 case Bytecodes::_i2s: tos_in = itos; break; 1538 case Bytecodes::_l2i: // fall through 1539 case Bytecodes::_l2f: // fall through 1540 case Bytecodes::_l2d: tos_in = ltos; break; 1541 case Bytecodes::_f2i: // fall through 1542 case Bytecodes::_f2l: // fall through 1543 case Bytecodes::_f2d: tos_in = ftos; break; 1544 case Bytecodes::_d2i: // fall through 1545 case Bytecodes::_d2l: // fall through 1546 case Bytecodes::_d2f: tos_in = dtos; break; 1547 default : ShouldNotReachHere(); 1548 } 1549 switch (bytecode()) { 1550 case Bytecodes::_l2i: // fall through 1551 case Bytecodes::_f2i: // fall through 1552 case Bytecodes::_d2i: // fall through 1553 case Bytecodes::_i2b: // fall through 1554 case Bytecodes::_i2c: // fall through 1555 case Bytecodes::_i2s: tos_out = itos; break; 1556 case Bytecodes::_i2l: // fall through 1557 case Bytecodes::_f2l: // fall through 1558 case Bytecodes::_d2l: tos_out = ltos; break; 1559 case Bytecodes::_i2f: // fall through 1560 case Bytecodes::_l2f: // fall through 1561 case Bytecodes::_d2f: tos_out = ftos; break; 1562 case Bytecodes::_i2d: // fall through 1563 case Bytecodes::_l2d: // fall through 1564 case Bytecodes::_f2d: tos_out = dtos; break; 1565 default : ShouldNotReachHere(); 1566 } 1567 transition(tos_in, tos_out); 1568 #endif 1569 1570 // Conversion 1571 Label done; 1572 switch (bytecode()) { 1573 case Bytecodes::_i2l: 1574 __ extsw(R17_tos, R17_tos); 1575 break; 1576 1577 case Bytecodes::_l2i: 1578 // Nothing to do, we'll continue to work with the lower bits. 1579 break; 1580 1581 case Bytecodes::_i2b: 1582 __ extsb(R17_tos, R17_tos); 1583 break; 1584 1585 case Bytecodes::_i2c: 1586 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1587 break; 1588 1589 case Bytecodes::_i2s: 1590 __ extsh(R17_tos, R17_tos); 1591 break; 1592 1593 case Bytecodes::_i2d: 1594 __ extsw(R17_tos, R17_tos); 1595 case Bytecodes::_l2d: 1596 __ move_l_to_d(); 1597 __ fcfid(F15_ftos, F15_ftos); 1598 break; 1599 1600 case Bytecodes::_i2f: 1601 __ extsw(R17_tos, R17_tos); 1602 __ move_l_to_d(); 1603 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1604 // Comment: alternatively, load with sign extend could be done by lfiwax. 1605 __ fcfids(F15_ftos, F15_ftos); 1606 } else { 1607 __ fcfid(F15_ftos, F15_ftos); 1608 __ frsp(F15_ftos, F15_ftos); 1609 } 1610 break; 1611 1612 case Bytecodes::_l2f: 1613 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1614 __ move_l_to_d(); 1615 __ fcfids(F15_ftos, F15_ftos); 1616 } else { 1617 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1618 __ mr(R3_ARG1, R17_tos); 1619 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1620 __ fmr(F15_ftos, F1_RET); 1621 } 1622 break; 1623 1624 case Bytecodes::_f2d: 1625 // empty 1626 break; 1627 1628 case Bytecodes::_d2f: 1629 __ frsp(F15_ftos, F15_ftos); 1630 break; 1631 1632 case Bytecodes::_d2i: 1633 case Bytecodes::_f2i: 1634 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1635 __ li(R17_tos, 0); // 0 in case of NAN 1636 __ bso(CCR0, done); 1637 __ fctiwz(F15_ftos, F15_ftos); 1638 __ move_d_to_l(); 1639 break; 1640 1641 case Bytecodes::_d2l: 1642 case Bytecodes::_f2l: 1643 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1644 __ li(R17_tos, 0); // 0 in case of NAN 1645 __ bso(CCR0, done); 1646 __ fctidz(F15_ftos, F15_ftos); 1647 __ move_d_to_l(); 1648 break; 1649 1650 default: ShouldNotReachHere(); 1651 } 1652 __ bind(done); 1653 } 1654 1655 // Long compare 1656 void TemplateTable::lcmp() { 1657 transition(ltos, itos); 1658 1659 const Register Rscratch = R11_scratch1; 1660 __ pop_l(Rscratch); // first operand, deeper in stack 1661 1662 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1663 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1664 __ srwi(Rscratch, R17_tos, 30); 1665 __ srawi(R17_tos, R17_tos, 31); 1666 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1667 } 1668 1669 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1670 // unordered_result == -1 => fcmpl or dcmpl 1671 // unordered_result == 1 => fcmpg or dcmpg 1672 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1673 const FloatRegister Rfirst = F0_SCRATCH, 1674 Rsecond = F15_ftos; 1675 const Register Rscratch = R11_scratch1; 1676 1677 if (is_float) { 1678 __ pop_f(Rfirst); 1679 } else { 1680 __ pop_d(Rfirst); 1681 } 1682 1683 Label Lunordered, Ldone; 1684 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1685 if (unordered_result) { 1686 __ bso(CCR0, Lunordered); 1687 } 1688 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1689 __ srwi(Rscratch, R17_tos, 30); 1690 __ srawi(R17_tos, R17_tos, 31); 1691 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1692 if (unordered_result) { 1693 __ b(Ldone); 1694 __ bind(Lunordered); 1695 __ load_const_optimized(R17_tos, unordered_result); 1696 } 1697 __ bind(Ldone); 1698 } 1699 1700 // Branch_conditional which takes TemplateTable::Condition. 1701 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1702 bool positive = false; 1703 Assembler::Condition cond = Assembler::equal; 1704 switch (cc) { 1705 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1706 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1707 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1708 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1709 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1710 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1711 default: ShouldNotReachHere(); 1712 } 1713 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1714 int bi = Assembler::bi0(crx, cond); 1715 __ bc(bo, bi, L); 1716 } 1717 1718 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1719 1720 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1721 __ verify_thread(); 1722 1723 const Register Rscratch1 = R11_scratch1, 1724 Rscratch2 = R12_scratch2, 1725 Rscratch3 = R3_ARG1, 1726 R4_counters = R4_ARG2, 1727 bumped_count = R31, 1728 Rdisp = R22_tmp2; 1729 1730 __ profile_taken_branch(Rscratch1, bumped_count); 1731 1732 // Get (wide) offset. 1733 if (is_wide) { 1734 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1735 } else { 1736 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1737 } 1738 1739 // -------------------------------------------------------------------------- 1740 // Handle all the JSR stuff here, then exit. 1741 // It's much shorter and cleaner than intermingling with the 1742 // non-JSR normal-branch stuff occurring below. 1743 if (is_jsr) { 1744 // Compute return address as bci in Otos_i. 1745 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1746 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1747 __ subf(R17_tos, Rscratch1, Rscratch2); 1748 1749 // Bump bcp to target of JSR. 1750 __ add(R14_bcp, Rdisp, R14_bcp); 1751 // Push returnAddress for "ret" on stack. 1752 __ push_ptr(R17_tos); 1753 // And away we go! 1754 __ dispatch_next(vtos, 0 ,true); 1755 return; 1756 } 1757 1758 // -------------------------------------------------------------------------- 1759 // Normal (non-jsr) branch handling 1760 1761 // Bump bytecode pointer by displacement (take the branch). 1762 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1763 1764 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1765 if (increment_invocation_counter_for_backward_branches) { 1766 Label Lforward; 1767 1768 // Check branch direction. 1769 __ cmpdi(CCR0, Rdisp, 0); 1770 __ bgt(CCR0, Lforward); 1771 1772 __ get_method_counters(R19_method, R4_counters, Lforward); 1773 1774 if (TieredCompilation) { 1775 Label Lno_mdo, Loverflow; 1776 const int increment = InvocationCounter::count_increment; 1777 if (ProfileInterpreter) { 1778 Register Rmdo = Rscratch1; 1779 1780 // If no method data exists, go to profile_continue. 1781 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1782 __ cmpdi(CCR0, Rmdo, 0); 1783 __ beq(CCR0, Lno_mdo); 1784 1785 // Increment backedge counter in the MDO. 1786 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1787 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1788 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo); 1789 __ addi(Rscratch2, Rscratch2, increment); 1790 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1791 if (UseOnStackReplacement) { 1792 __ and_(Rscratch3, Rscratch2, Rscratch3); 1793 __ bne(CCR0, Lforward); 1794 __ b(Loverflow); 1795 } else { 1796 __ b(Lforward); 1797 } 1798 } 1799 1800 // If there's no MDO, increment counter in method. 1801 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1802 __ bind(Lno_mdo); 1803 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1804 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters); 1805 __ addi(Rscratch2, Rscratch2, increment); 1806 __ stw(Rscratch2, mo_bc_offs, R4_counters); 1807 if (UseOnStackReplacement) { 1808 __ and_(Rscratch3, Rscratch2, Rscratch3); 1809 __ bne(CCR0, Lforward); 1810 } else { 1811 __ b(Lforward); 1812 } 1813 __ bind(Loverflow); 1814 1815 // Notify point for loop, pass branch bytecode. 1816 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp). 1817 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 1818 1819 // Was an OSR adapter generated? 1820 __ cmpdi(CCR0, R3_RET, 0); 1821 __ beq(CCR0, Lforward); 1822 1823 // Has the nmethod been invalidated already? 1824 __ lbz(R0, nmethod::state_offset(), R3_RET); 1825 __ cmpwi(CCR0, R0, nmethod::in_use); 1826 __ bne(CCR0, Lforward); 1827 1828 // Migrate the interpreter frame off of the stack. 1829 // We can use all registers because we will not return to interpreter from this point. 1830 1831 // Save nmethod. 1832 const Register osr_nmethod = R31; 1833 __ mr(osr_nmethod, R3_RET); 1834 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1835 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1836 __ reset_last_Java_frame(); 1837 // OSR buffer is in ARG1. 1838 1839 // Remove the interpreter frame. 1840 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1841 1842 // Jump to the osr code. 1843 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1844 __ mtlr(R0); 1845 __ mtctr(R11_scratch1); 1846 __ bctr(); 1847 1848 } else { 1849 1850 const Register invoke_ctr = Rscratch1; 1851 // Update Backedge branch separately from invocations. 1852 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1853 1854 if (ProfileInterpreter) { 1855 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward); 1856 if (UseOnStackReplacement) { 1857 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2); 1858 } 1859 } else { 1860 if (UseOnStackReplacement) { 1861 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2); 1862 } 1863 } 1864 } 1865 1866 __ bind(Lforward); 1867 } 1868 __ dispatch_next(vtos, 0, true); 1869 } 1870 1871 // Helper function for if_cmp* methods below. 1872 // Factored out common compare and branch code. 1873 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1874 Label Lnot_taken; 1875 // Note: The condition code we get is the condition under which we 1876 // *fall through*! So we have to inverse the CC here. 1877 1878 if (is_jint) { 1879 if (cmp0) { 1880 __ cmpwi(CCR0, Rfirst, 0); 1881 } else { 1882 __ cmpw(CCR0, Rfirst, Rsecond); 1883 } 1884 } else { 1885 if (cmp0) { 1886 __ cmpdi(CCR0, Rfirst, 0); 1887 } else { 1888 __ cmpd(CCR0, Rfirst, Rsecond); 1889 } 1890 } 1891 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1892 1893 // Conition is false => Jump! 1894 branch(false, false); 1895 1896 // Condition is not true => Continue. 1897 __ align(32, 12); 1898 __ bind(Lnot_taken); 1899 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1900 } 1901 1902 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1903 void TemplateTable::if_0cmp(Condition cc) { 1904 transition(itos, vtos); 1905 1906 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1907 } 1908 1909 // Compare integer values and fall through if CC holds, branch away otherwise. 1910 // 1911 // Interface: 1912 // - Rfirst: First operand (older stack value) 1913 // - tos: Second operand (younger stack value) 1914 void TemplateTable::if_icmp(Condition cc) { 1915 transition(itos, vtos); 1916 1917 const Register Rfirst = R0, 1918 Rsecond = R17_tos; 1919 1920 __ pop_i(Rfirst); 1921 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1922 } 1923 1924 void TemplateTable::if_nullcmp(Condition cc) { 1925 transition(atos, vtos); 1926 1927 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1928 } 1929 1930 void TemplateTable::if_acmp(Condition cc) { 1931 transition(atos, vtos); 1932 1933 const Register Rfirst = R0, 1934 Rsecond = R17_tos; 1935 1936 __ pop_ptr(Rfirst); 1937 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1938 } 1939 1940 void TemplateTable::ret() { 1941 locals_index(R11_scratch1); 1942 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1943 1944 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1945 1946 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1947 __ add(R11_scratch1, R17_tos, R11_scratch1); 1948 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1949 __ dispatch_next(vtos, 0, true); 1950 } 1951 1952 void TemplateTable::wide_ret() { 1953 transition(vtos, vtos); 1954 1955 const Register Rindex = R3_ARG1, 1956 Rscratch1 = R11_scratch1, 1957 Rscratch2 = R12_scratch2; 1958 1959 locals_index_wide(Rindex); 1960 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1961 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1962 // Tos now contains the bci, compute the bcp from that. 1963 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1964 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1965 __ add(R14_bcp, Rscratch1, Rscratch2); 1966 __ dispatch_next(vtos, 0, true); 1967 } 1968 1969 void TemplateTable::tableswitch() { 1970 transition(itos, vtos); 1971 1972 Label Ldispatch, Ldefault_case; 1973 Register Rlow_byte = R3_ARG1, 1974 Rindex = Rlow_byte, 1975 Rhigh_byte = R4_ARG2, 1976 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1977 Rscratch1 = R11_scratch1, 1978 Rscratch2 = R12_scratch2, 1979 Roffset = R6_ARG4; 1980 1981 // Align bcp. 1982 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1983 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1984 1985 // Load lo & hi. 1986 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1987 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1988 1989 // Check for default case (=index outside [low,high]). 1990 __ cmpw(CCR0, R17_tos, Rlow_byte); 1991 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1992 __ blt(CCR0, Ldefault_case); 1993 __ bgt(CCR1, Ldefault_case); 1994 1995 // Lookup dispatch offset. 1996 __ sub(Rindex, R17_tos, Rlow_byte); 1997 __ extsw(Rindex, Rindex); 1998 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1999 __ sldi(Rindex, Rindex, LogBytesPerInt); 2000 __ addi(Rindex, Rindex, 3 * BytesPerInt); 2001 #if defined(VM_LITTLE_ENDIAN) 2002 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 2003 __ extsw(Roffset, Roffset); 2004 #else 2005 __ lwax(Roffset, Rdef_offset_addr, Rindex); 2006 #endif 2007 __ b(Ldispatch); 2008 2009 __ bind(Ldefault_case); 2010 __ profile_switch_default(Rhigh_byte, Rscratch1); 2011 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 2012 2013 __ bind(Ldispatch); 2014 2015 __ add(R14_bcp, Roffset, R14_bcp); 2016 __ dispatch_next(vtos, 0, true); 2017 } 2018 2019 void TemplateTable::lookupswitch() { 2020 transition(itos, itos); 2021 __ stop("lookupswitch bytecode should have been rewritten"); 2022 } 2023 2024 // Table switch using linear search through cases. 2025 // Bytecode stream format: 2026 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2027 // Note: Everything is big-endian format here. 2028 void TemplateTable::fast_linearswitch() { 2029 transition(itos, vtos); 2030 2031 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 2032 Register Rcount = R3_ARG1, 2033 Rcurrent_pair = R4_ARG2, 2034 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 2035 Roffset = R31, // Might need to survive C call. 2036 Rvalue = R12_scratch2, 2037 Rscratch = R11_scratch1, 2038 Rcmp_value = R17_tos; 2039 2040 // Align bcp. 2041 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 2042 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 2043 2044 // Setup loop counter and limit. 2045 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2046 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 2047 2048 __ mtctr(Rcount); 2049 __ cmpwi(CCR0, Rcount, 0); 2050 __ bne(CCR0, Lloop_entry); 2051 2052 // Default case 2053 __ bind(Ldefault_case); 2054 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 2055 if (ProfileInterpreter) { 2056 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 2057 } 2058 __ b(Lcontinue_execution); 2059 2060 // Next iteration 2061 __ bind(Lsearch_loop); 2062 __ bdz(Ldefault_case); 2063 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 2064 __ bind(Lloop_entry); 2065 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 2066 __ cmpw(CCR0, Rvalue, Rcmp_value); 2067 __ bne(CCR0, Lsearch_loop); 2068 2069 // Found, load offset. 2070 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 2071 // Calculate case index and profile 2072 __ mfctr(Rcurrent_pair); 2073 if (ProfileInterpreter) { 2074 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 2075 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 2076 } 2077 2078 __ bind(Lcontinue_execution); 2079 __ add(R14_bcp, Roffset, R14_bcp); 2080 __ dispatch_next(vtos, 0, true); 2081 } 2082 2083 // Table switch using binary search (value/offset pairs are ordered). 2084 // Bytecode stream format: 2085 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2086 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 2087 void TemplateTable::fast_binaryswitch() { 2088 2089 transition(itos, vtos); 2090 // Implementation using the following core algorithm: (copied from Intel) 2091 // 2092 // int binary_search(int key, LookupswitchPair* array, int n) { 2093 // // Binary search according to "Methodik des Programmierens" by 2094 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2095 // int i = 0; 2096 // int j = n; 2097 // while (i+1 < j) { 2098 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2099 // // with Q: for all i: 0 <= i < n: key < a[i] 2100 // // where a stands for the array and assuming that the (inexisting) 2101 // // element a[n] is infinitely big. 2102 // int h = (i + j) >> 1; 2103 // // i < h < j 2104 // if (key < array[h].fast_match()) { 2105 // j = h; 2106 // } else { 2107 // i = h; 2108 // } 2109 // } 2110 // // R: a[i] <= key < a[i+1] or Q 2111 // // (i.e., if key is within array, i is the correct index) 2112 // return i; 2113 // } 2114 2115 // register allocation 2116 const Register Rkey = R17_tos; // already set (tosca) 2117 const Register Rarray = R3_ARG1; 2118 const Register Ri = R4_ARG2; 2119 const Register Rj = R5_ARG3; 2120 const Register Rh = R6_ARG4; 2121 const Register Rscratch = R11_scratch1; 2122 2123 const int log_entry_size = 3; 2124 const int entry_size = 1 << log_entry_size; 2125 2126 Label found; 2127 2128 // Find Array start, 2129 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2130 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2131 2132 // initialize i & j 2133 __ li(Ri,0); 2134 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2135 2136 // and start. 2137 Label entry; 2138 __ b(entry); 2139 2140 // binary search loop 2141 { Label loop; 2142 __ bind(loop); 2143 // int h = (i + j) >> 1; 2144 __ srdi(Rh, Rh, 1); 2145 // if (key < array[h].fast_match()) { 2146 // j = h; 2147 // } else { 2148 // i = h; 2149 // } 2150 __ sldi(Rscratch, Rh, log_entry_size); 2151 #if defined(VM_LITTLE_ENDIAN) 2152 __ lwbrx(Rscratch, Rscratch, Rarray); 2153 #else 2154 __ lwzx(Rscratch, Rscratch, Rarray); 2155 #endif 2156 2157 // if (key < current value) 2158 // Rh = Rj 2159 // else 2160 // Rh = Ri 2161 Label Lgreater; 2162 __ cmpw(CCR0, Rkey, Rscratch); 2163 __ bge(CCR0, Lgreater); 2164 __ mr(Rj, Rh); 2165 __ b(entry); 2166 __ bind(Lgreater); 2167 __ mr(Ri, Rh); 2168 2169 // while (i+1 < j) 2170 __ bind(entry); 2171 __ addi(Rscratch, Ri, 1); 2172 __ cmpw(CCR0, Rscratch, Rj); 2173 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2174 2175 __ blt(CCR0, loop); 2176 } 2177 2178 // End of binary search, result index is i (must check again!). 2179 Label default_case; 2180 Label continue_execution; 2181 if (ProfileInterpreter) { 2182 __ mr(Rh, Ri); // Save index in i for profiling. 2183 } 2184 // Ri = value offset 2185 __ sldi(Ri, Ri, log_entry_size); 2186 __ add(Ri, Ri, Rarray); 2187 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2188 2189 Label not_found; 2190 // Ri = offset offset 2191 __ cmpw(CCR0, Rkey, Rscratch); 2192 __ beq(CCR0, not_found); 2193 // entry not found -> j = default offset 2194 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2195 __ b(default_case); 2196 2197 __ bind(not_found); 2198 // entry found -> j = offset 2199 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2200 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2201 2202 if (ProfileInterpreter) { 2203 __ b(continue_execution); 2204 } 2205 2206 __ bind(default_case); // fall through (if not profiling) 2207 __ profile_switch_default(Ri, Rscratch); 2208 2209 __ bind(continue_execution); 2210 2211 __ extsw(Rj, Rj); 2212 __ add(R14_bcp, Rj, R14_bcp); 2213 __ dispatch_next(vtos, 0 , true); 2214 } 2215 2216 void TemplateTable::_return(TosState state) { 2217 transition(state, state); 2218 assert(_desc->calls_vm(), 2219 "inconsistent calls_vm information"); // call in remove_activation 2220 2221 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2222 2223 Register Rscratch = R11_scratch1, 2224 Rklass = R12_scratch2, 2225 Rklass_flags = Rklass; 2226 Label Lskip_register_finalizer; 2227 2228 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2229 assert(state == vtos, "only valid state"); 2230 __ ld(R17_tos, 0, R18_locals); 2231 2232 // Load klass of this obj. 2233 __ load_klass(Rklass, R17_tos); 2234 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2235 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2236 __ bfalse(CCR0, Lskip_register_finalizer); 2237 2238 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2239 2240 __ align(32, 12); 2241 __ bind(Lskip_register_finalizer); 2242 } 2243 2244 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 2245 Label no_safepoint; 2246 __ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread); 2247 __ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit()); 2248 __ beq(CCR0, no_safepoint); 2249 __ push(state); 2250 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2251 __ pop(state); 2252 __ bind(no_safepoint); 2253 } 2254 2255 // Move the result value into the correct register and remove memory stack frame. 2256 __ remove_activation(state, /* throw_monitor_exception */ true); 2257 // Restoration of lr done by remove_activation. 2258 switch (state) { 2259 // Narrow result if state is itos but result type is smaller. 2260 // Need to narrow in the return bytecode rather than in generate_return_entry 2261 // since compiled code callers expect the result to already be narrowed. 2262 case itos: __ narrow(R17_tos); /* fall through */ 2263 case ltos: 2264 case atos: __ mr(R3_RET, R17_tos); break; 2265 case ftos: 2266 case dtos: __ fmr(F1_RET, F15_ftos); break; 2267 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2268 // to get visible before the reference to the object gets stored anywhere. 2269 __ membar(Assembler::StoreStore); break; 2270 default : ShouldNotReachHere(); 2271 } 2272 __ blr(); 2273 } 2274 2275 // ============================================================================ 2276 // Constant pool cache access 2277 // 2278 // Memory ordering: 2279 // 2280 // Like done in C++ interpreter, we load the fields 2281 // - _indices 2282 // - _f12_oop 2283 // acquired, because these are asked if the cache is already resolved. We don't 2284 // want to float loads above this check. 2285 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2286 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2287 2288 // Call into the VM if call site is not yet resolved 2289 // 2290 // Input regs: 2291 // - None, all passed regs are outputs. 2292 // 2293 // Returns: 2294 // - Rcache: The const pool cache entry that contains the resolved result. 2295 // - Rresult: Either noreg or output for f1/f2. 2296 // 2297 // Kills: 2298 // - Rscratch 2299 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2300 2301 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2302 Label Lresolved, Ldone; 2303 2304 Bytecodes::Code code = bytecode(); 2305 switch (code) { 2306 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2307 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2308 } 2309 2310 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2311 // We are resolved if the indices offset contains the current bytecode. 2312 #if defined(VM_LITTLE_ENDIAN) 2313 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2314 #else 2315 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2316 #endif 2317 // Acquire by cmp-br-isync (see below). 2318 __ cmpdi(CCR0, Rscratch, (int)code); 2319 __ beq(CCR0, Lresolved); 2320 2321 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2322 __ li(R4_ARG2, code); 2323 __ call_VM(noreg, entry, R4_ARG2, true); 2324 2325 // Update registers with resolved info. 2326 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2327 __ b(Ldone); 2328 2329 __ bind(Lresolved); 2330 __ isync(); // Order load wrt. succeeding loads. 2331 __ bind(Ldone); 2332 } 2333 2334 // Load the constant pool cache entry at field accesses into registers. 2335 // The Rcache and Rindex registers must be set before call. 2336 // Input: 2337 // - Rcache, Rindex 2338 // Output: 2339 // - Robj, Roffset, Rflags 2340 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2341 Register Rcache, 2342 Register Rindex /* unused on PPC64 */, 2343 Register Roffset, 2344 Register Rflags, 2345 bool is_static = false) { 2346 assert_different_registers(Rcache, Rflags, Roffset); 2347 // assert(Rindex == noreg, "parameter not used on PPC64"); 2348 2349 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2350 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2351 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2352 if (is_static) { 2353 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2354 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2355 __ resolve_oop_handle(Robj); 2356 // Acquire not needed here. Following access has an address dependency on this value. 2357 } 2358 } 2359 2360 // Load the constant pool cache entry at invokes into registers. 2361 // Resolve if necessary. 2362 2363 // Input Registers: 2364 // - None, bcp is used, though 2365 // 2366 // Return registers: 2367 // - Rmethod (f1 field or f2 if invokevirtual) 2368 // - Ritable_index (f2 field) 2369 // - Rflags (flags field) 2370 // 2371 // Kills: 2372 // - R21 2373 // 2374 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2375 Register Rmethod, 2376 Register Ritable_index, 2377 Register Rflags, 2378 bool is_invokevirtual, 2379 bool is_invokevfinal, 2380 bool is_invokedynamic) { 2381 2382 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2383 // Determine constant pool cache field offsets. 2384 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2385 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2386 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2387 // Access constant pool cache fields. 2388 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2389 2390 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2391 2392 if (is_invokevfinal) { 2393 assert(Ritable_index == noreg, "register not used"); 2394 // Already resolved. 2395 __ get_cache_and_index_at_bcp(Rcache, 1); 2396 } else { 2397 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2398 } 2399 2400 __ ld(Rmethod, method_offset, Rcache); 2401 __ ld(Rflags, flags_offset, Rcache); 2402 2403 if (Ritable_index != noreg) { 2404 __ ld(Ritable_index, index_offset, Rcache); 2405 } 2406 } 2407 2408 // ============================================================================ 2409 // Field access 2410 2411 // Volatile variables demand their effects be made known to all CPU's 2412 // in order. Store buffers on most chips allow reads & writes to 2413 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2414 // without some kind of memory barrier (i.e., it's not sufficient that 2415 // the interpreter does not reorder volatile references, the hardware 2416 // also must not reorder them). 2417 // 2418 // According to the new Java Memory Model (JMM): 2419 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2420 // writes act as aquire & release, so: 2421 // (2) A read cannot let unrelated NON-volatile memory refs that 2422 // happen after the read float up to before the read. It's OK for 2423 // non-volatile memory refs that happen before the volatile read to 2424 // float down below it. 2425 // (3) Similar a volatile write cannot let unrelated NON-volatile 2426 // memory refs that happen BEFORE the write float down to after the 2427 // write. It's OK for non-volatile memory refs that happen after the 2428 // volatile write to float up before it. 2429 // 2430 // We only put in barriers around volatile refs (they are expensive), 2431 // not _between_ memory refs (that would require us to track the 2432 // flavor of the previous memory refs). Requirements (2) and (3) 2433 // require some barriers before volatile stores and after volatile 2434 // loads. These nearly cover requirement (1) but miss the 2435 // volatile-store-volatile-load case. This final case is placed after 2436 // volatile-stores although it could just as well go before 2437 // volatile-loads. 2438 2439 // The registers cache and index expected to be set before call. 2440 // Correct values of the cache and index registers are preserved. 2441 // Kills: 2442 // Rcache (if has_tos) 2443 // Rscratch 2444 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2445 2446 assert_different_registers(Rcache, Rscratch); 2447 2448 if (JvmtiExport::can_post_field_access()) { 2449 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2450 Label Lno_field_access_post; 2451 2452 // Check if post field access in enabled. 2453 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2454 __ lwz(Rscratch, offs, Rscratch); 2455 2456 __ cmpwi(CCR0, Rscratch, 0); 2457 __ beq(CCR0, Lno_field_access_post); 2458 2459 // Post access enabled - do it! 2460 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2461 if (is_static) { 2462 __ li(R17_tos, 0); 2463 } else { 2464 if (has_tos) { 2465 // The fast bytecode versions have obj ptr in register. 2466 // Thus, save object pointer before call_VM() clobbers it 2467 // put object on tos where GC wants it. 2468 __ push_ptr(R17_tos); 2469 } else { 2470 // Load top of stack (do not pop the value off the stack). 2471 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2472 } 2473 __ verify_oop(R17_tos); 2474 } 2475 // tos: object pointer or NULL if static 2476 // cache: cache entry pointer 2477 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2478 if (!is_static && has_tos) { 2479 // Restore object pointer. 2480 __ pop_ptr(R17_tos); 2481 __ verify_oop(R17_tos); 2482 } else { 2483 // Cache is still needed to get class or obj. 2484 __ get_cache_and_index_at_bcp(Rcache, 1); 2485 } 2486 2487 __ align(32, 12); 2488 __ bind(Lno_field_access_post); 2489 } 2490 } 2491 2492 // kills R11_scratch1 2493 void TemplateTable::pop_and_check_object(Register Roop) { 2494 Register Rtmp = R11_scratch1; 2495 2496 assert_different_registers(Rtmp, Roop); 2497 __ pop_ptr(Roop); 2498 // For field access must check obj. 2499 __ null_check_throw(Roop, -1, Rtmp); 2500 __ verify_oop(Roop); 2501 } 2502 2503 // PPC64: implement volatile loads as fence-store-acquire. 2504 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2505 transition(vtos, vtos); 2506 2507 Label Lacquire, Lisync; 2508 2509 const Register Rcache = R3_ARG1, 2510 Rclass_or_obj = R22_tmp2, 2511 Roffset = R23_tmp3, 2512 Rflags = R31, 2513 Rbtable = R5_ARG3, 2514 Rbc = R6_ARG4, 2515 Rscratch = R12_scratch2; 2516 2517 static address field_branch_table[number_of_states], 2518 static_branch_table[number_of_states]; 2519 2520 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2521 2522 // Get field offset. 2523 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2524 2525 // JVMTI support 2526 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2527 2528 // Load after possible GC. 2529 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2530 2531 // Load pointer to branch table. 2532 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2533 2534 // Get volatile flag. 2535 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2536 // Note: sync is needed before volatile load on PPC64. 2537 2538 // Check field type. 2539 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2540 2541 #ifdef ASSERT 2542 Label LFlagInvalid; 2543 __ cmpldi(CCR0, Rflags, number_of_states); 2544 __ bge(CCR0, LFlagInvalid); 2545 #endif 2546 2547 // Load from branch table and dispatch (volatile case: one instruction ahead). 2548 __ sldi(Rflags, Rflags, LogBytesPerWord); 2549 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2550 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2551 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2552 } 2553 __ ldx(Rbtable, Rbtable, Rflags); 2554 2555 // Get the obj from stack. 2556 if (!is_static) { 2557 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2558 } else { 2559 __ verify_oop(Rclass_or_obj); 2560 } 2561 2562 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2563 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2564 } 2565 __ mtctr(Rbtable); 2566 __ bctr(); 2567 2568 #ifdef ASSERT 2569 __ bind(LFlagInvalid); 2570 __ stop("got invalid flag", 0x654); 2571 #endif 2572 2573 if (!is_static && rc == may_not_rewrite) { 2574 // We reuse the code from is_static. It's jumped to via the table above. 2575 return; 2576 } 2577 2578 #ifdef ASSERT 2579 // __ bind(Lvtos); 2580 address pc_before_fence = __ pc(); 2581 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2582 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2583 assert(branch_table[vtos] == 0, "can't compute twice"); 2584 branch_table[vtos] = __ pc(); // non-volatile_entry point 2585 __ stop("vtos unexpected", 0x655); 2586 #endif 2587 2588 __ align(32, 28, 28); // Align load. 2589 // __ bind(Ldtos); 2590 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2591 assert(branch_table[dtos] == 0, "can't compute twice"); 2592 branch_table[dtos] = __ pc(); // non-volatile_entry point 2593 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2594 __ push(dtos); 2595 if (!is_static && rc == may_rewrite) { 2596 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2597 } 2598 { 2599 Label acquire_double; 2600 __ beq(CCR6, acquire_double); // Volatile? 2601 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2602 2603 __ bind(acquire_double); 2604 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2605 __ beq_predict_taken(CCR0, Lisync); 2606 __ b(Lisync); // In case of NAN. 2607 } 2608 2609 __ align(32, 28, 28); // Align load. 2610 // __ bind(Lftos); 2611 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2612 assert(branch_table[ftos] == 0, "can't compute twice"); 2613 branch_table[ftos] = __ pc(); // non-volatile_entry point 2614 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2615 __ push(ftos); 2616 if (!is_static && rc == may_rewrite) { 2617 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2618 } 2619 { 2620 Label acquire_float; 2621 __ beq(CCR6, acquire_float); // Volatile? 2622 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2623 2624 __ bind(acquire_float); 2625 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2626 __ beq_predict_taken(CCR0, Lisync); 2627 __ b(Lisync); // In case of NAN. 2628 } 2629 2630 __ align(32, 28, 28); // Align load. 2631 // __ bind(Litos); 2632 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2633 assert(branch_table[itos] == 0, "can't compute twice"); 2634 branch_table[itos] = __ pc(); // non-volatile_entry point 2635 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2636 __ push(itos); 2637 if (!is_static && rc == may_rewrite) { 2638 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2639 } 2640 __ beq(CCR6, Lacquire); // Volatile? 2641 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2642 2643 __ align(32, 28, 28); // Align load. 2644 // __ bind(Lltos); 2645 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2646 assert(branch_table[ltos] == 0, "can't compute twice"); 2647 branch_table[ltos] = __ pc(); // non-volatile_entry point 2648 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2649 __ push(ltos); 2650 if (!is_static && rc == may_rewrite) { 2651 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2652 } 2653 __ beq(CCR6, Lacquire); // Volatile? 2654 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2655 2656 __ align(32, 28, 28); // Align load. 2657 // __ bind(Lbtos); 2658 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2659 assert(branch_table[btos] == 0, "can't compute twice"); 2660 branch_table[btos] = __ pc(); // non-volatile_entry point 2661 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2662 __ extsb(R17_tos, R17_tos); 2663 __ push(btos); 2664 if (!is_static && rc == may_rewrite) { 2665 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2666 } 2667 __ beq(CCR6, Lacquire); // Volatile? 2668 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2669 2670 __ align(32, 28, 28); // Align load. 2671 // __ bind(Lztos); (same code as btos) 2672 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2673 assert(branch_table[ztos] == 0, "can't compute twice"); 2674 branch_table[ztos] = __ pc(); // non-volatile_entry point 2675 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2676 __ push(ztos); 2677 if (!is_static && rc == may_rewrite) { 2678 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2679 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2680 } 2681 __ beq(CCR6, Lacquire); // Volatile? 2682 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2683 2684 __ align(32, 28, 28); // Align load. 2685 // __ bind(Lctos); 2686 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2687 assert(branch_table[ctos] == 0, "can't compute twice"); 2688 branch_table[ctos] = __ pc(); // non-volatile_entry point 2689 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2690 __ push(ctos); 2691 if (!is_static && rc == may_rewrite) { 2692 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2693 } 2694 __ beq(CCR6, Lacquire); // Volatile? 2695 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2696 2697 __ align(32, 28, 28); // Align load. 2698 // __ bind(Lstos); 2699 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2700 assert(branch_table[stos] == 0, "can't compute twice"); 2701 branch_table[stos] = __ pc(); // non-volatile_entry point 2702 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2703 __ push(stos); 2704 if (!is_static && rc == may_rewrite) { 2705 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2706 } 2707 __ beq(CCR6, Lacquire); // Volatile? 2708 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2709 2710 __ align(32, 28, 28); // Align load. 2711 // __ bind(Latos); 2712 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2713 assert(branch_table[atos] == 0, "can't compute twice"); 2714 branch_table[atos] = __ pc(); // non-volatile_entry point 2715 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2716 __ verify_oop(R17_tos); 2717 __ push(atos); 2718 //__ dcbt(R17_tos); // prefetch 2719 if (!is_static && rc == may_rewrite) { 2720 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2721 } 2722 __ beq(CCR6, Lacquire); // Volatile? 2723 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2724 2725 __ align(32, 12); 2726 __ bind(Lacquire); 2727 __ twi_0(R17_tos); 2728 __ bind(Lisync); 2729 __ isync(); // acquire 2730 2731 #ifdef ASSERT 2732 for (int i = 0; i<number_of_states; ++i) { 2733 assert(branch_table[i], "get initialization"); 2734 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2735 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2736 } 2737 #endif 2738 } 2739 2740 void TemplateTable::getfield(int byte_no) { 2741 getfield_or_static(byte_no, false); 2742 } 2743 2744 void TemplateTable::nofast_getfield(int byte_no) { 2745 getfield_or_static(byte_no, false, may_not_rewrite); 2746 } 2747 2748 void TemplateTable::getstatic(int byte_no) { 2749 getfield_or_static(byte_no, true); 2750 } 2751 2752 // The registers cache and index expected to be set before call. 2753 // The function may destroy various registers, just not the cache and index registers. 2754 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2755 2756 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2757 2758 if (JvmtiExport::can_post_field_modification()) { 2759 Label Lno_field_mod_post; 2760 2761 // Check if post field access in enabled. 2762 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2763 __ lwz(Rscratch, offs, Rscratch); 2764 2765 __ cmpwi(CCR0, Rscratch, 0); 2766 __ beq(CCR0, Lno_field_mod_post); 2767 2768 // Do the post 2769 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2770 const Register Robj = Rscratch; 2771 2772 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2773 if (is_static) { 2774 // Life is simple. Null out the object pointer. 2775 __ li(Robj, 0); 2776 } else { 2777 // In case of the fast versions, value lives in registers => put it back on tos. 2778 int offs = Interpreter::expr_offset_in_bytes(0); 2779 Register base = R15_esp; 2780 switch(bytecode()) { 2781 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2782 case Bytecodes::_fast_iputfield: // Fall through 2783 case Bytecodes::_fast_bputfield: // Fall through 2784 case Bytecodes::_fast_zputfield: // Fall through 2785 case Bytecodes::_fast_cputfield: // Fall through 2786 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2787 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2788 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2789 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2790 default: { 2791 offs = 0; 2792 base = Robj; 2793 const Register Rflags = Robj; 2794 Label is_one_slot; 2795 // Life is harder. The stack holds the value on top, followed by the 2796 // object. We don't know the size of the value, though; it could be 2797 // one or two words depending on its type. As a result, we must find 2798 // the type to determine where the object is. 2799 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2800 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2801 2802 __ cmpwi(CCR0, Rflags, ltos); 2803 __ cmpwi(CCR1, Rflags, dtos); 2804 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2805 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2806 __ beq(CCR0, is_one_slot); 2807 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2808 __ bind(is_one_slot); 2809 break; 2810 } 2811 } 2812 __ ld(Robj, offs, base); 2813 __ verify_oop(Robj); 2814 } 2815 2816 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2817 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2818 __ get_cache_and_index_at_bcp(Rcache, 1); 2819 2820 // In case of the fast versions, value lives in registers => put it back on tos. 2821 switch(bytecode()) { 2822 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2823 case Bytecodes::_fast_iputfield: // Fall through 2824 case Bytecodes::_fast_bputfield: // Fall through 2825 case Bytecodes::_fast_zputfield: // Fall through 2826 case Bytecodes::_fast_cputfield: // Fall through 2827 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2828 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2829 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2830 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2831 default: break; // Nothin' to do. 2832 } 2833 2834 __ align(32, 12); 2835 __ bind(Lno_field_mod_post); 2836 } 2837 } 2838 2839 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2840 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2841 Label Lvolatile; 2842 2843 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2844 Rclass_or_obj = R31, // Needs to survive C call. 2845 Roffset = R22_tmp2, // Needs to survive C call. 2846 Rflags = R3_ARG1, 2847 Rbtable = R4_ARG2, 2848 Rscratch = R11_scratch1, 2849 Rscratch2 = R12_scratch2, 2850 Rscratch3 = R6_ARG4, 2851 Rbc = Rscratch3; 2852 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2853 2854 static address field_rw_branch_table[number_of_states], 2855 field_norw_branch_table[number_of_states], 2856 static_branch_table[number_of_states]; 2857 2858 address* branch_table = is_static ? static_branch_table : 2859 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2860 2861 // Stack (grows up): 2862 // value 2863 // obj 2864 2865 // Load the field offset. 2866 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2867 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2868 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2869 2870 // Load pointer to branch table. 2871 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2872 2873 // Get volatile flag. 2874 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2875 2876 // Check the field type. 2877 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2878 2879 #ifdef ASSERT 2880 Label LFlagInvalid; 2881 __ cmpldi(CCR0, Rflags, number_of_states); 2882 __ bge(CCR0, LFlagInvalid); 2883 #endif 2884 2885 // Load from branch table and dispatch (volatile case: one instruction ahead). 2886 __ sldi(Rflags, Rflags, LogBytesPerWord); 2887 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2888 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2889 } 2890 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2891 __ ldx(Rbtable, Rbtable, Rflags); 2892 2893 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2894 __ mtctr(Rbtable); 2895 __ bctr(); 2896 2897 #ifdef ASSERT 2898 __ bind(LFlagInvalid); 2899 __ stop("got invalid flag", 0x656); 2900 2901 // __ bind(Lvtos); 2902 address pc_before_release = __ pc(); 2903 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2904 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2905 assert(branch_table[vtos] == 0, "can't compute twice"); 2906 branch_table[vtos] = __ pc(); // non-volatile_entry point 2907 __ stop("vtos unexpected", 0x657); 2908 #endif 2909 2910 __ align(32, 28, 28); // Align pop. 2911 // __ bind(Ldtos); 2912 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2913 assert(branch_table[dtos] == 0, "can't compute twice"); 2914 branch_table[dtos] = __ pc(); // non-volatile_entry point 2915 __ pop(dtos); 2916 if (!is_static) { 2917 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2918 } 2919 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2920 if (!is_static && rc == may_rewrite) { 2921 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2922 } 2923 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2924 __ beq(CR_is_vol, Lvolatile); // Volatile? 2925 } 2926 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2927 2928 __ align(32, 28, 28); // Align pop. 2929 // __ bind(Lftos); 2930 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2931 assert(branch_table[ftos] == 0, "can't compute twice"); 2932 branch_table[ftos] = __ pc(); // non-volatile_entry point 2933 __ pop(ftos); 2934 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2935 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2936 if (!is_static && rc == may_rewrite) { 2937 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2938 } 2939 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2940 __ beq(CR_is_vol, Lvolatile); // Volatile? 2941 } 2942 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2943 2944 __ align(32, 28, 28); // Align pop. 2945 // __ bind(Litos); 2946 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2947 assert(branch_table[itos] == 0, "can't compute twice"); 2948 branch_table[itos] = __ pc(); // non-volatile_entry point 2949 __ pop(itos); 2950 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2951 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2952 if (!is_static && rc == may_rewrite) { 2953 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2954 } 2955 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2956 __ beq(CR_is_vol, Lvolatile); // Volatile? 2957 } 2958 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2959 2960 __ align(32, 28, 28); // Align pop. 2961 // __ bind(Lltos); 2962 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2963 assert(branch_table[ltos] == 0, "can't compute twice"); 2964 branch_table[ltos] = __ pc(); // non-volatile_entry point 2965 __ pop(ltos); 2966 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2967 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2968 if (!is_static && rc == may_rewrite) { 2969 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2970 } 2971 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2972 __ beq(CR_is_vol, Lvolatile); // Volatile? 2973 } 2974 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2975 2976 __ align(32, 28, 28); // Align pop. 2977 // __ bind(Lbtos); 2978 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2979 assert(branch_table[btos] == 0, "can't compute twice"); 2980 branch_table[btos] = __ pc(); // non-volatile_entry point 2981 __ pop(btos); 2982 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2983 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2984 if (!is_static && rc == may_rewrite) { 2985 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2986 } 2987 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2988 __ beq(CR_is_vol, Lvolatile); // Volatile? 2989 } 2990 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2991 2992 __ align(32, 28, 28); // Align pop. 2993 // __ bind(Lztos); 2994 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2995 assert(branch_table[ztos] == 0, "can't compute twice"); 2996 branch_table[ztos] = __ pc(); // non-volatile_entry point 2997 __ pop(ztos); 2998 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2999 __ andi(R17_tos, R17_tos, 0x1); 3000 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3001 if (!is_static && rc == may_rewrite) { 3002 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); 3003 } 3004 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3005 __ beq(CR_is_vol, Lvolatile); // Volatile? 3006 } 3007 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3008 3009 __ align(32, 28, 28); // Align pop. 3010 // __ bind(Lctos); 3011 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3012 assert(branch_table[ctos] == 0, "can't compute twice"); 3013 branch_table[ctos] = __ pc(); // non-volatile_entry point 3014 __ pop(ctos); 3015 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 3016 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3017 if (!is_static && rc == may_rewrite) { 3018 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 3019 } 3020 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3021 __ beq(CR_is_vol, Lvolatile); // Volatile? 3022 } 3023 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3024 3025 __ align(32, 28, 28); // Align pop. 3026 // __ bind(Lstos); 3027 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3028 assert(branch_table[stos] == 0, "can't compute twice"); 3029 branch_table[stos] = __ pc(); // non-volatile_entry point 3030 __ pop(stos); 3031 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 3032 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3033 if (!is_static && rc == may_rewrite) { 3034 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 3035 } 3036 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3037 __ beq(CR_is_vol, Lvolatile); // Volatile? 3038 } 3039 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3040 3041 __ align(32, 28, 28); // Align pop. 3042 // __ bind(Latos); 3043 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 3044 assert(branch_table[atos] == 0, "can't compute twice"); 3045 branch_table[atos] = __ pc(); // non-volatile_entry point 3046 __ pop(atos); 3047 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 3048 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 3049 if (!is_static && rc == may_rewrite) { 3050 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 3051 } 3052 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3053 __ beq(CR_is_vol, Lvolatile); // Volatile? 3054 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3055 3056 __ align(32, 12); 3057 __ bind(Lvolatile); 3058 __ fence(); 3059 } 3060 // fallthru: __ b(Lexit); 3061 3062 #ifdef ASSERT 3063 for (int i = 0; i<number_of_states; ++i) { 3064 assert(branch_table[i], "put initialization"); 3065 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 3066 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 3067 } 3068 #endif 3069 } 3070 3071 void TemplateTable::putfield(int byte_no) { 3072 putfield_or_static(byte_no, false); 3073 } 3074 3075 void TemplateTable::nofast_putfield(int byte_no) { 3076 putfield_or_static(byte_no, false, may_not_rewrite); 3077 } 3078 3079 void TemplateTable::putstatic(int byte_no) { 3080 putfield_or_static(byte_no, true); 3081 } 3082 3083 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 3084 void TemplateTable::jvmti_post_fast_field_mod() { 3085 __ should_not_reach_here(); 3086 } 3087 3088 void TemplateTable::fast_storefield(TosState state) { 3089 transition(state, vtos); 3090 3091 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 3092 Rclass_or_obj = R31, // Needs to survive C call. 3093 Roffset = R22_tmp2, // Needs to survive C call. 3094 Rflags = R3_ARG1, 3095 Rscratch = R11_scratch1, 3096 Rscratch2 = R12_scratch2, 3097 Rscratch3 = R4_ARG2; 3098 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 3099 3100 // Constant pool already resolved => Load flags and offset of field. 3101 __ get_cache_and_index_at_bcp(Rcache, 1); 3102 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 3103 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3104 3105 // Get the obj and the final store addr. 3106 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 3107 3108 // Get volatile flag. 3109 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3110 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 3111 { 3112 Label LnotVolatile; 3113 __ beq(CCR0, LnotVolatile); 3114 __ release(); 3115 __ align(32, 12); 3116 __ bind(LnotVolatile); 3117 } 3118 3119 // Do the store and fencing. 3120 switch(bytecode()) { 3121 case Bytecodes::_fast_aputfield: 3122 // Store into the field. 3123 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 3124 break; 3125 3126 case Bytecodes::_fast_iputfield: 3127 __ stwx(R17_tos, Rclass_or_obj, Roffset); 3128 break; 3129 3130 case Bytecodes::_fast_lputfield: 3131 __ stdx(R17_tos, Rclass_or_obj, Roffset); 3132 break; 3133 3134 case Bytecodes::_fast_zputfield: 3135 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 3136 // fall through to bputfield 3137 case Bytecodes::_fast_bputfield: 3138 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3139 break; 3140 3141 case Bytecodes::_fast_cputfield: 3142 case Bytecodes::_fast_sputfield: 3143 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3144 break; 3145 3146 case Bytecodes::_fast_fputfield: 3147 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 3148 break; 3149 3150 case Bytecodes::_fast_dputfield: 3151 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 3152 break; 3153 3154 default: ShouldNotReachHere(); 3155 } 3156 3157 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3158 Label LVolatile; 3159 __ beq(CR_is_vol, LVolatile); 3160 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3161 3162 __ align(32, 12); 3163 __ bind(LVolatile); 3164 __ fence(); 3165 } 3166 } 3167 3168 void TemplateTable::fast_accessfield(TosState state) { 3169 transition(atos, state); 3170 3171 Label LisVolatile; 3172 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3173 3174 const Register Rcache = R3_ARG1, 3175 Rclass_or_obj = R17_tos, 3176 Roffset = R22_tmp2, 3177 Rflags = R23_tmp3, 3178 Rscratch = R12_scratch2; 3179 3180 // Constant pool already resolved. Get the field offset. 3181 __ get_cache_and_index_at_bcp(Rcache, 1); 3182 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3183 3184 // JVMTI support 3185 jvmti_post_field_access(Rcache, Rscratch, false, true); 3186 3187 // Get the load address. 3188 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3189 3190 // Get volatile flag. 3191 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3192 __ bne(CCR0, LisVolatile); 3193 3194 switch(bytecode()) { 3195 case Bytecodes::_fast_agetfield: 3196 { 3197 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3198 __ verify_oop(R17_tos); 3199 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3200 3201 __ bind(LisVolatile); 3202 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3203 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3204 __ verify_oop(R17_tos); 3205 __ twi_0(R17_tos); 3206 __ isync(); 3207 break; 3208 } 3209 case Bytecodes::_fast_igetfield: 3210 { 3211 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3212 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3213 3214 __ bind(LisVolatile); 3215 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3216 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3217 __ twi_0(R17_tos); 3218 __ isync(); 3219 break; 3220 } 3221 case Bytecodes::_fast_lgetfield: 3222 { 3223 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3224 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3225 3226 __ bind(LisVolatile); 3227 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3228 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3229 __ twi_0(R17_tos); 3230 __ isync(); 3231 break; 3232 } 3233 case Bytecodes::_fast_bgetfield: 3234 { 3235 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3236 __ extsb(R17_tos, R17_tos); 3237 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3238 3239 __ bind(LisVolatile); 3240 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3241 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3242 __ twi_0(R17_tos); 3243 __ extsb(R17_tos, R17_tos); 3244 __ isync(); 3245 break; 3246 } 3247 case Bytecodes::_fast_cgetfield: 3248 { 3249 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3250 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3251 3252 __ bind(LisVolatile); 3253 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3254 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3255 __ twi_0(R17_tos); 3256 __ isync(); 3257 break; 3258 } 3259 case Bytecodes::_fast_sgetfield: 3260 { 3261 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3262 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3263 3264 __ bind(LisVolatile); 3265 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3266 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3267 __ twi_0(R17_tos); 3268 __ isync(); 3269 break; 3270 } 3271 case Bytecodes::_fast_fgetfield: 3272 { 3273 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3274 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3275 3276 __ bind(LisVolatile); 3277 Label Ldummy; 3278 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3279 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3280 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3281 __ bne_predict_not_taken(CCR0, Ldummy); 3282 __ bind(Ldummy); 3283 __ isync(); 3284 break; 3285 } 3286 case Bytecodes::_fast_dgetfield: 3287 { 3288 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3289 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3290 3291 __ bind(LisVolatile); 3292 Label Ldummy; 3293 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3294 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3295 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3296 __ bne_predict_not_taken(CCR0, Ldummy); 3297 __ bind(Ldummy); 3298 __ isync(); 3299 break; 3300 } 3301 default: ShouldNotReachHere(); 3302 } 3303 } 3304 3305 void TemplateTable::fast_xaccess(TosState state) { 3306 transition(vtos, state); 3307 3308 Label LisVolatile; 3309 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3310 const Register Rcache = R3_ARG1, 3311 Rclass_or_obj = R17_tos, 3312 Roffset = R22_tmp2, 3313 Rflags = R23_tmp3, 3314 Rscratch = R12_scratch2; 3315 3316 __ ld(Rclass_or_obj, 0, R18_locals); 3317 3318 // Constant pool already resolved. Get the field offset. 3319 __ get_cache_and_index_at_bcp(Rcache, 2); 3320 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3321 3322 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3323 3324 // Needed to report exception at the correct bcp. 3325 __ addi(R14_bcp, R14_bcp, 1); 3326 3327 // Get the load address. 3328 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3329 3330 // Get volatile flag. 3331 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3332 __ bne(CCR0, LisVolatile); 3333 3334 switch(state) { 3335 case atos: 3336 { 3337 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3338 __ verify_oop(R17_tos); 3339 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3340 3341 __ bind(LisVolatile); 3342 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3343 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3344 __ verify_oop(R17_tos); 3345 __ twi_0(R17_tos); 3346 __ isync(); 3347 break; 3348 } 3349 case itos: 3350 { 3351 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3352 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3353 3354 __ bind(LisVolatile); 3355 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3356 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3357 __ twi_0(R17_tos); 3358 __ isync(); 3359 break; 3360 } 3361 case ftos: 3362 { 3363 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3364 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3365 3366 __ bind(LisVolatile); 3367 Label Ldummy; 3368 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3369 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3370 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3371 __ bne_predict_not_taken(CCR0, Ldummy); 3372 __ bind(Ldummy); 3373 __ isync(); 3374 break; 3375 } 3376 default: ShouldNotReachHere(); 3377 } 3378 __ addi(R14_bcp, R14_bcp, -1); 3379 } 3380 3381 // ============================================================================ 3382 // Calls 3383 3384 // Common code for invoke 3385 // 3386 // Input: 3387 // - byte_no 3388 // 3389 // Output: 3390 // - Rmethod: The method to invoke next. 3391 // - Rret_addr: The return address to return to. 3392 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3393 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3394 // - Rflags: Method flags from const pool cache. 3395 // 3396 // Kills: 3397 // - Rscratch1 3398 // 3399 void TemplateTable::prepare_invoke(int byte_no, 3400 Register Rmethod, // linked method (or i-klass) 3401 Register Rret_addr,// return address 3402 Register Rindex, // itable index, MethodType, etc. 3403 Register Rrecv, // If caller wants to see it. 3404 Register Rflags, // If caller wants to test it. 3405 Register Rscratch 3406 ) { 3407 // Determine flags. 3408 const Bytecodes::Code code = bytecode(); 3409 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3410 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3411 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3412 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3413 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3414 const bool load_receiver = (Rrecv != noreg); 3415 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3416 3417 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3418 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3419 assert_different_registers(Rret_addr, Rscratch); 3420 3421 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3422 3423 // Saving of SP done in call_from_interpreter. 3424 3425 // Maybe push "appendix" to arguments. 3426 if (is_invokedynamic || is_invokehandle) { 3427 Label Ldone; 3428 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3429 __ beq(CCR0, Ldone); 3430 // Push "appendix" (MethodType, CallSite, etc.). 3431 // This must be done before we get the receiver, 3432 // since the parameter_size includes it. 3433 __ load_resolved_reference_at_index(Rscratch, Rindex); 3434 __ verify_oop(Rscratch); 3435 __ push_ptr(Rscratch); 3436 __ bind(Ldone); 3437 } 3438 3439 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3440 if (load_receiver) { 3441 const Register Rparam_count = Rscratch; 3442 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3443 __ load_receiver(Rparam_count, Rrecv); 3444 __ verify_oop(Rrecv); 3445 } 3446 3447 // Get return address. 3448 { 3449 Register Rtable_addr = Rscratch; 3450 Register Rret_type = Rret_addr; 3451 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3452 3453 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3454 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3455 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3456 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3457 // Get return address. 3458 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3459 } 3460 } 3461 3462 // Helper for virtual calls. Load target out of vtable and jump off! 3463 // Kills all passed registers. 3464 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3465 3466 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3467 const Register Rtarget_method = Rindex; 3468 3469 // Get target method & entry point. 3470 const int base = in_bytes(Klass::vtable_start_offset()); 3471 // Calc vtable addr scale the vtable index by 8. 3472 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes())); 3473 // Load target. 3474 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3475 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3476 // Argument and return type profiling. 3477 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3478 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3479 } 3480 3481 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3482 void TemplateTable::invokevirtual(int byte_no) { 3483 transition(vtos, vtos); 3484 3485 Register Rtable_addr = R11_scratch1, 3486 Rret_type = R12_scratch2, 3487 Rret_addr = R5_ARG3, 3488 Rflags = R22_tmp2, // Should survive C call. 3489 Rrecv = R3_ARG1, 3490 Rrecv_klass = Rrecv, 3491 Rvtableindex_or_method = R31, // Should survive C call. 3492 Rnum_params = R4_ARG2, 3493 Rnew_bc = R6_ARG4; 3494 3495 Label LnotFinal; 3496 3497 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3498 3499 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3500 __ bfalse(CCR0, LnotFinal); 3501 3502 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 3503 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3504 } 3505 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3506 3507 __ align(32, 12); 3508 __ bind(LnotFinal); 3509 // Load "this" pointer (receiver). 3510 __ rldicl(Rnum_params, Rflags, 64, 48); 3511 __ load_receiver(Rnum_params, Rrecv); 3512 __ verify_oop(Rrecv); 3513 3514 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3515 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3516 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3517 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3518 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3519 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3520 __ load_klass(Rrecv_klass, Rrecv); 3521 __ verify_klass_ptr(Rrecv_klass); 3522 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3523 3524 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3525 } 3526 3527 void TemplateTable::fast_invokevfinal(int byte_no) { 3528 transition(vtos, vtos); 3529 3530 assert(byte_no == f2_byte, "use this argument"); 3531 Register Rflags = R22_tmp2, 3532 Rmethod = R31; 3533 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3534 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3535 } 3536 3537 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3538 3539 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3540 3541 // Load receiver from stack slot. 3542 Register Rrecv = Rscratch2; 3543 Register Rnum_params = Rrecv; 3544 3545 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3546 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3547 3548 // Get return address. 3549 Register Rtable_addr = Rscratch1, 3550 Rret_addr = Rflags, 3551 Rret_type = Rret_addr; 3552 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3553 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3554 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3555 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3556 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3557 3558 // Load receiver and receiver NULL check. 3559 __ load_receiver(Rnum_params, Rrecv); 3560 __ null_check_throw(Rrecv, -1, Rscratch1); 3561 3562 __ profile_final_call(Rrecv, Rscratch1); 3563 // Argument and return type profiling. 3564 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3565 3566 // Do the call. 3567 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3568 } 3569 3570 void TemplateTable::invokespecial(int byte_no) { 3571 assert(byte_no == f1_byte, "use this argument"); 3572 transition(vtos, vtos); 3573 3574 Register Rtable_addr = R3_ARG1, 3575 Rret_addr = R4_ARG2, 3576 Rflags = R5_ARG3, 3577 Rreceiver = R6_ARG4, 3578 Rmethod = R31; 3579 3580 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3581 3582 // Receiver NULL check. 3583 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3584 3585 __ profile_call(R11_scratch1, R12_scratch2); 3586 // Argument and return type profiling. 3587 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3588 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3589 } 3590 3591 void TemplateTable::invokestatic(int byte_no) { 3592 assert(byte_no == f1_byte, "use this argument"); 3593 transition(vtos, vtos); 3594 3595 Register Rtable_addr = R3_ARG1, 3596 Rret_addr = R4_ARG2, 3597 Rflags = R5_ARG3; 3598 3599 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3600 3601 __ profile_call(R11_scratch1, R12_scratch2); 3602 // Argument and return type profiling. 3603 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3604 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3605 } 3606 3607 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3608 Register Rret, 3609 Register Rflags, 3610 Register Rmethod, 3611 Register Rtemp1, 3612 Register Rtemp2) { 3613 3614 assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3615 Label LnotFinal; 3616 3617 // Check for vfinal. 3618 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3619 __ bfalse(CCR0, LnotFinal); 3620 3621 Register Rscratch = Rflags; // Rflags is dead now. 3622 3623 // Final call case. 3624 __ profile_final_call(Rtemp1, Rscratch); 3625 // Argument and return type profiling. 3626 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true); 3627 // Do the final call - the index (f2) contains the method. 3628 __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */); 3629 3630 // Non-final callc case. 3631 __ bind(LnotFinal); 3632 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3633 generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch); 3634 } 3635 3636 void TemplateTable::invokeinterface(int byte_no) { 3637 assert(byte_no == f1_byte, "use this argument"); 3638 transition(vtos, vtos); 3639 3640 const Register Rscratch1 = R11_scratch1, 3641 Rscratch2 = R12_scratch2, 3642 Rmethod = R6_ARG4, 3643 Rmethod2 = R9_ARG7, 3644 Rinterface_klass = R5_ARG3, 3645 Rret_addr = R8_ARG6, 3646 Rindex = R10_ARG8, 3647 Rreceiver = R3_ARG1, 3648 Rrecv_klass = R4_ARG2, 3649 Rflags = R7_ARG5; 3650 3651 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1); 3652 3653 // Get receiver klass. 3654 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2); 3655 __ load_klass(Rrecv_klass, Rreceiver); 3656 3657 // Check corner case object method. 3658 Label LobjectMethod, L_no_such_interface, Lthrow_ame; 3659 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3660 __ btrue(CCR0, LobjectMethod); 3661 3662 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2, 3663 L_no_such_interface, /*return_method=*/false); 3664 3665 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3666 3667 // Find entry point to call. 3668 3669 // Get declaring interface class from method 3670 __ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod); 3671 __ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass); 3672 __ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass); 3673 3674 // Get itable index from method 3675 __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod); 3676 __ subfic(Rindex, Rindex, Method::itable_index_max); 3677 3678 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2, 3679 L_no_such_interface); 3680 3681 __ cmpdi(CCR0, Rmethod2, 0); 3682 __ beq(CCR0, Lthrow_ame); 3683 // Found entry. Jump off! 3684 // Argument and return type profiling. 3685 __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true); 3686 //__ profile_called_method(Rindex, Rscratch1); 3687 __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2); 3688 3689 // Vtable entry was NULL => Throw abstract method error. 3690 __ bind(Lthrow_ame); 3691 // Pass arguments for generating a verbose error message. 3692 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3693 Rrecv_klass, Rmethod); 3694 3695 // Interface was not found => Throw incompatible class change error. 3696 __ bind(L_no_such_interface); 3697 // Pass arguments for generating a verbose error message. 3698 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3699 Rrecv_klass, Rinterface_klass); 3700 DEBUG_ONLY( __ should_not_reach_here(); ) 3701 3702 // Special case of invokeinterface called for virtual method of 3703 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3704 // The invokeinterface was rewritten to a invokevirtual, hence we have 3705 // to handle this corner case. This code isn't produced by javac, but could 3706 // be produced by another compliant java compiler. 3707 __ bind(LobjectMethod); 3708 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2); 3709 } 3710 3711 void TemplateTable::invokedynamic(int byte_no) { 3712 transition(vtos, vtos); 3713 3714 const Register Rret_addr = R3_ARG1, 3715 Rflags = R4_ARG2, 3716 Rmethod = R22_tmp2, 3717 Rscratch1 = R11_scratch1, 3718 Rscratch2 = R12_scratch2; 3719 3720 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3721 3722 // Profile this call. 3723 __ profile_call(Rscratch1, Rscratch2); 3724 3725 // Off we go. With the new method handles, we don't jump to a method handle 3726 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3727 // to be the callsite object the bootstrap method returned. This is passed to a 3728 // "link" method which does the dispatch (Most likely just grabs the MH stored 3729 // inside the callsite and does an invokehandle). 3730 // Argument and return type profiling. 3731 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3732 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3733 } 3734 3735 void TemplateTable::invokehandle(int byte_no) { 3736 transition(vtos, vtos); 3737 3738 const Register Rret_addr = R3_ARG1, 3739 Rflags = R4_ARG2, 3740 Rrecv = R5_ARG3, 3741 Rmethod = R22_tmp2, 3742 Rscratch1 = R11_scratch1, 3743 Rscratch2 = R12_scratch2; 3744 3745 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3746 __ verify_method_ptr(Rmethod); 3747 __ null_check_throw(Rrecv, -1, Rscratch2); 3748 3749 __ profile_final_call(Rrecv, Rscratch1); 3750 3751 // Still no call from handle => We call the method handle interpreter here. 3752 // Argument and return type profiling. 3753 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3754 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3755 } 3756 3757 // ============================================================================= 3758 // Allocation 3759 3760 // Puts allocated obj ref onto the expression stack. 3761 void TemplateTable::_new() { 3762 transition(vtos, atos); 3763 3764 Label Lslow_case, 3765 Ldone; 3766 3767 const Register RallocatedObject = R17_tos, 3768 RinstanceKlass = R9_ARG7, 3769 Rscratch = R11_scratch1, 3770 Roffset = R8_ARG6, 3771 Rinstance_size = Roffset, 3772 Rcpool = R4_ARG2, 3773 Rtags = R3_ARG1, 3774 Rindex = R5_ARG3; 3775 3776 // -------------------------------------------------------------------------- 3777 // Check if fast case is possible. 3778 3779 // Load pointers to const pool and const pool's tags array. 3780 __ get_cpool_and_tags(Rcpool, Rtags); 3781 // Load index of constant pool entry. 3782 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3783 3784 // Note: compared to other architectures, PPC's implementation always goes 3785 // to the slow path if TLAB is used and fails. 3786 if (UseTLAB) { 3787 // Make sure the class we're about to instantiate has been resolved 3788 // This is done before loading instanceKlass to be consistent with the order 3789 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3790 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3791 __ lbzx(Rtags, Rindex, Rtags); 3792 3793 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3794 __ bne(CCR0, Lslow_case); 3795 3796 // Get instanceKlass 3797 __ sldi(Roffset, Rindex, LogBytesPerWord); 3798 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass); 3799 3800 // Make sure klass is fully initialized and get instance_size. 3801 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3802 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3803 3804 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3805 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3806 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3807 3808 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3809 __ beq(CCR0, Lslow_case); 3810 3811 // -------------------------------------------------------------------------- 3812 // Fast case: 3813 // Allocate the instance. 3814 // 1) Try to allocate in the TLAB. 3815 // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3816 3817 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3818 Register RnewTopValue = R6_ARG4; 3819 Register RendValue = R7_ARG5; 3820 3821 // Check if we can allocate in the TLAB. 3822 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3823 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3824 3825 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3826 3827 // If there is enough space, we do not CAS and do not clear. 3828 __ cmpld(CCR0, RnewTopValue, RendValue); 3829 __ bgt(CCR0, Lslow_case); 3830 3831 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3832 3833 if (!ZeroTLAB) { 3834 // -------------------------------------------------------------------------- 3835 // Init1: Zero out newly allocated memory. 3836 // Initialize remaining object fields. 3837 Register Rbase = Rtags; 3838 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3839 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3840 __ srdi(Rinstance_size, Rinstance_size, 3); 3841 3842 // Clear out object skipping header. Takes also care of the zero length case. 3843 __ clear_memory_doubleword(Rbase, Rinstance_size); 3844 } 3845 3846 // -------------------------------------------------------------------------- 3847 // Init2: Initialize the header: mark, klass 3848 // Init mark. 3849 if (UseBiasedLocking) { 3850 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3851 } else { 3852 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3853 } 3854 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3855 3856 // Init klass. 3857 __ store_klass_gap(RallocatedObject); 3858 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3859 3860 // Check and trigger dtrace event. 3861 SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone); 3862 __ push(atos); 3863 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3864 __ pop(atos); 3865 3866 __ b(Ldone); 3867 } 3868 3869 // -------------------------------------------------------------------------- 3870 // slow case 3871 __ bind(Lslow_case); 3872 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3873 3874 // continue 3875 __ bind(Ldone); 3876 3877 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3878 __ membar(Assembler::StoreStore); 3879 } 3880 3881 void TemplateTable::newarray() { 3882 transition(itos, atos); 3883 3884 __ lbz(R4, 1, R14_bcp); 3885 __ extsw(R5, R17_tos); 3886 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3887 3888 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3889 __ membar(Assembler::StoreStore); 3890 } 3891 3892 void TemplateTable::anewarray() { 3893 transition(itos, atos); 3894 3895 __ get_constant_pool(R4); 3896 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3897 __ extsw(R6, R17_tos); // size 3898 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3899 3900 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3901 __ membar(Assembler::StoreStore); 3902 } 3903 3904 // Allocate a multi dimensional array 3905 void TemplateTable::multianewarray() { 3906 transition(vtos, atos); 3907 3908 Register Rptr = R31; // Needs to survive C call. 3909 3910 // Put ndims * wordSize into frame temp slot 3911 __ lbz(Rptr, 3, R14_bcp); 3912 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3913 // Esp points past last_dim, so set to R4 to first_dim address. 3914 __ add(R4, Rptr, R15_esp); 3915 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3916 // Pop all dimensions off the stack. 3917 __ add(R15_esp, Rptr, R15_esp); 3918 3919 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3920 __ membar(Assembler::StoreStore); 3921 } 3922 3923 void TemplateTable::arraylength() { 3924 transition(atos, itos); 3925 3926 Label LnoException; 3927 __ verify_oop(R17_tos); 3928 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3929 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3930 } 3931 3932 // ============================================================================ 3933 // Typechecks 3934 3935 void TemplateTable::checkcast() { 3936 transition(atos, atos); 3937 3938 Label Ldone, Lis_null, Lquicked, Lresolved; 3939 Register Roffset = R6_ARG4, 3940 RobjKlass = R4_ARG2, 3941 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3942 Rcpool = R11_scratch1, 3943 Rtags = R12_scratch2; 3944 3945 // Null does not pass. 3946 __ cmpdi(CCR0, R17_tos, 0); 3947 __ beq(CCR0, Lis_null); 3948 3949 // Get constant pool tag to find out if the bytecode has already been "quickened". 3950 __ get_cpool_and_tags(Rcpool, Rtags); 3951 3952 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3953 3954 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3955 __ lbzx(Rtags, Rtags, Roffset); 3956 3957 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3958 __ beq(CCR0, Lquicked); 3959 3960 // Call into the VM to "quicken" instanceof. 3961 __ push_ptr(); // for GC 3962 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3963 __ get_vm_result_2(RspecifiedKlass); 3964 __ pop_ptr(); // Restore receiver. 3965 __ b(Lresolved); 3966 3967 // Extract target class from constant pool. 3968 __ bind(Lquicked); 3969 __ sldi(Roffset, Roffset, LogBytesPerWord); 3970 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3971 3972 // Do the checkcast. 3973 __ bind(Lresolved); 3974 // Get value klass in RobjKlass. 3975 __ load_klass(RobjKlass, R17_tos); 3976 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3977 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3978 3979 // Not a subtype; so must throw exception 3980 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3981 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3982 __ mtctr(R11_scratch1); 3983 __ bctr(); 3984 3985 // Profile the null case. 3986 __ align(32, 12); 3987 __ bind(Lis_null); 3988 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3989 3990 __ align(32, 12); 3991 __ bind(Ldone); 3992 } 3993 3994 // Output: 3995 // - tos == 0: Obj was null or not an instance of class. 3996 // - tos == 1: Obj was an instance of class. 3997 void TemplateTable::instanceof() { 3998 transition(atos, itos); 3999 4000 Label Ldone, Lis_null, Lquicked, Lresolved; 4001 Register Roffset = R6_ARG4, 4002 RobjKlass = R4_ARG2, 4003 RspecifiedKlass = R5_ARG3, 4004 Rcpool = R11_scratch1, 4005 Rtags = R12_scratch2; 4006 4007 // Null does not pass. 4008 __ cmpdi(CCR0, R17_tos, 0); 4009 __ beq(CCR0, Lis_null); 4010 4011 // Get constant pool tag to find out if the bytecode has already been "quickened". 4012 __ get_cpool_and_tags(Rcpool, Rtags); 4013 4014 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 4015 4016 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 4017 __ lbzx(Rtags, Rtags, Roffset); 4018 4019 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 4020 __ beq(CCR0, Lquicked); 4021 4022 // Call into the VM to "quicken" instanceof. 4023 __ push_ptr(); // for GC 4024 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4025 __ get_vm_result_2(RspecifiedKlass); 4026 __ pop_ptr(); // Restore receiver. 4027 __ b(Lresolved); 4028 4029 // Extract target class from constant pool. 4030 __ bind(Lquicked); 4031 __ sldi(Roffset, Roffset, LogBytesPerWord); 4032 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 4033 4034 // Do the checkcast. 4035 __ bind(Lresolved); 4036 // Get value klass in RobjKlass. 4037 __ load_klass(RobjKlass, R17_tos); 4038 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 4039 __ li(R17_tos, 1); 4040 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 4041 __ li(R17_tos, 0); 4042 4043 if (ProfileInterpreter) { 4044 __ b(Ldone); 4045 } 4046 4047 // Profile the null case. 4048 __ align(32, 12); 4049 __ bind(Lis_null); 4050 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 4051 4052 __ align(32, 12); 4053 __ bind(Ldone); 4054 } 4055 4056 // ============================================================================= 4057 // Breakpoints 4058 4059 void TemplateTable::_breakpoint() { 4060 transition(vtos, vtos); 4061 4062 // Get the unpatched byte code. 4063 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 4064 __ mr(R31, R3_RET); 4065 4066 // Post the breakpoint event. 4067 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 4068 4069 // Complete the execution of original bytecode. 4070 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 4071 } 4072 4073 // ============================================================================= 4074 // Exceptions 4075 4076 void TemplateTable::athrow() { 4077 transition(atos, vtos); 4078 4079 // Exception oop is in tos 4080 __ verify_oop(R17_tos); 4081 4082 __ null_check_throw(R17_tos, -1, R11_scratch1); 4083 4084 // Throw exception interpreter entry expects exception oop to be in R3. 4085 __ mr(R3_RET, R17_tos); 4086 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 4087 __ mtctr(R11_scratch1); 4088 __ bctr(); 4089 } 4090 4091 // ============================================================================= 4092 // Synchronization 4093 // Searches the basic object lock list on the stack for a free slot 4094 // and uses it to lock the obect in tos. 4095 // 4096 // Recursive locking is enabled by exiting the search if the same 4097 // object is already found in the list. Thus, a new basic lock obj lock 4098 // is allocated "higher up" in the stack and thus is found first 4099 // at next monitor exit. 4100 void TemplateTable::monitorenter() { 4101 transition(atos, vtos); 4102 4103 __ verify_oop(R17_tos); 4104 4105 Register Rcurrent_monitor = R11_scratch1, 4106 Rcurrent_obj = R12_scratch2, 4107 Robj_to_lock = R17_tos, 4108 Rscratch1 = R3_ARG1, 4109 Rscratch2 = R4_ARG2, 4110 Rscratch3 = R5_ARG3, 4111 Rcurrent_obj_addr = R6_ARG4; 4112 4113 // ------------------------------------------------------------------------------ 4114 // Null pointer exception. 4115 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4116 4117 // Try to acquire a lock on the object. 4118 // Repeat until succeeded (i.e., until monitorenter returns true). 4119 4120 // ------------------------------------------------------------------------------ 4121 // Find a free slot in the monitor block. 4122 Label Lfound, Lexit, Lallocate_new; 4123 ConditionRegister found_free_slot = CCR0, 4124 found_same_obj = CCR1, 4125 reached_limit = CCR6; 4126 { 4127 Label Lloop, Lentry; 4128 Register Rlimit = Rcurrent_monitor; 4129 4130 // Set up search loop - start with topmost monitor. 4131 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4132 4133 __ ld(Rlimit, 0, R1_SP); 4134 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4135 4136 // Check if any slot is present => short cut to allocation if not. 4137 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4138 __ bgt(reached_limit, Lallocate_new); 4139 4140 // Pre-load topmost slot. 4141 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4142 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4143 // The search loop. 4144 __ bind(Lloop); 4145 // Found free slot? 4146 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4147 // Is this entry for same obj? If so, stop the search and take the found 4148 // free slot or allocate a new one to enable recursive locking. 4149 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4150 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4151 __ beq(found_free_slot, Lexit); 4152 __ beq(found_same_obj, Lallocate_new); 4153 __ bgt(reached_limit, Lallocate_new); 4154 // Check if last allocated BasicLockObj reached. 4155 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4156 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4157 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4158 __ b(Lloop); 4159 } 4160 4161 // ------------------------------------------------------------------------------ 4162 // Check if we found a free slot. 4163 __ bind(Lexit); 4164 4165 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4166 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4167 __ b(Lfound); 4168 4169 // We didn't find a free BasicObjLock => allocate one. 4170 __ align(32, 12); 4171 __ bind(Lallocate_new); 4172 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4173 __ mr(Rcurrent_monitor, R26_monitor); 4174 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4175 4176 // ------------------------------------------------------------------------------ 4177 // We now have a slot to lock. 4178 __ bind(Lfound); 4179 4180 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4181 // The object has already been poped from the stack, so the expression stack looks correct. 4182 __ addi(R14_bcp, R14_bcp, 1); 4183 4184 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4185 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4186 4187 // Check if there's enough space on the stack for the monitors after locking. 4188 // This emits a single store. 4189 __ generate_stack_overflow_check(0); 4190 4191 // The bcp has already been incremented. Just need to dispatch to next instruction. 4192 __ dispatch_next(vtos); 4193 } 4194 4195 void TemplateTable::monitorexit() { 4196 transition(atos, vtos); 4197 __ verify_oop(R17_tos); 4198 4199 Register Rcurrent_monitor = R11_scratch1, 4200 Rcurrent_obj = R12_scratch2, 4201 Robj_to_lock = R17_tos, 4202 Rcurrent_obj_addr = R3_ARG1, 4203 Rlimit = R4_ARG2; 4204 Label Lfound, Lillegal_monitor_state; 4205 4206 // Check corner case: unbalanced monitorEnter / Exit. 4207 __ ld(Rlimit, 0, R1_SP); 4208 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4209 4210 // Null pointer check. 4211 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4212 4213 __ cmpld(CCR0, R26_monitor, Rlimit); 4214 __ bgt(CCR0, Lillegal_monitor_state); 4215 4216 // Find the corresponding slot in the monitors stack section. 4217 { 4218 Label Lloop; 4219 4220 // Start with topmost monitor. 4221 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4222 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4223 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4224 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4225 4226 __ bind(Lloop); 4227 // Is this entry for same obj? 4228 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4229 __ beq(CCR0, Lfound); 4230 4231 // Check if last allocated BasicLockObj reached. 4232 4233 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4234 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4235 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4236 4237 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4238 __ ble(CCR0, Lloop); 4239 } 4240 4241 // Fell through without finding the basic obj lock => throw up! 4242 __ bind(Lillegal_monitor_state); 4243 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4244 __ should_not_reach_here(); 4245 4246 __ align(32, 12); 4247 __ bind(Lfound); 4248 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4249 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4250 __ unlock_object(Rcurrent_monitor); 4251 } 4252 4253 // ============================================================================ 4254 // Wide bytecodes 4255 4256 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4257 void TemplateTable::wide() { 4258 transition(vtos, vtos); 4259 4260 const Register Rtable = R11_scratch1, 4261 Rindex = R12_scratch2, 4262 Rtmp = R0; 4263 4264 __ lbz(Rindex, 1, R14_bcp); 4265 4266 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4267 4268 __ slwi(Rindex, Rindex, LogBytesPerWord); 4269 __ ldx(Rtmp, Rtable, Rindex); 4270 __ mtctr(Rtmp); 4271 __ bctr(); 4272 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4273 }