1 /* 2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.inline.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #undef __ 43 #define __ _masm-> 44 45 // ============================================================================ 46 // Misc helpers 47 48 // Do an oop store like *(base + index) = val OR *(base + offset) = val 49 // (only one of both variants is possible at the same time). 50 // Index can be noreg. 51 // Kills: 52 // Rbase, Rtmp 53 static void do_oop_store(InterpreterMacroAssembler* _masm, 54 Register Rbase, 55 RegisterOrConstant offset, 56 Register Rval, // Noreg means always null. 57 Register Rtmp1, 58 Register Rtmp2, 59 Register Rtmp3, 60 BarrierSet::Name barrier, 61 bool precise, 62 bool check_null) { 63 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 64 65 switch (barrier) { 66 #if INCLUDE_ALL_GCS 67 case BarrierSet::G1SATBCTLogging: 68 { 69 // Load and record the previous value. 70 __ g1_write_barrier_pre(Rbase, offset, 71 Rtmp3, /* holder of pre_val ? */ 72 Rtmp1, Rtmp2, false /* frame */); 73 74 Label Lnull, Ldone; 75 if (Rval != noreg) { 76 if (check_null) { 77 __ cmpdi(CCR0, Rval, 0); 78 __ beq(CCR0, Lnull); 79 } 80 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 81 // Mark the card. 82 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 83 __ add(Rbase, offset, Rbase); 84 } 85 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 86 if (check_null) { __ b(Ldone); } 87 } 88 89 if (Rval == noreg || check_null) { // Store null oop. 90 Register Rnull = Rval; 91 __ bind(Lnull); 92 if (Rval == noreg) { 93 Rnull = Rtmp1; 94 __ li(Rnull, 0); 95 } 96 if (UseCompressedOops) { 97 __ stw(Rnull, offset, Rbase); 98 } else { 99 __ std(Rnull, offset, Rbase); 100 } 101 } 102 __ bind(Ldone); 103 } 104 break; 105 #endif // INCLUDE_ALL_GCS 106 case BarrierSet::CardTableForRS: 107 case BarrierSet::CardTableExtension: 108 { 109 Label Lnull, Ldone; 110 if (Rval != noreg) { 111 if (check_null) { 112 __ cmpdi(CCR0, Rval, 0); 113 __ beq(CCR0, Lnull); 114 } 115 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 116 // Mark the card. 117 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 118 __ add(Rbase, offset, Rbase); 119 } 120 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 121 if (check_null) { 122 __ b(Ldone); 123 } 124 } 125 126 if (Rval == noreg || check_null) { // Store null oop. 127 Register Rnull = Rval; 128 __ bind(Lnull); 129 if (Rval == noreg) { 130 Rnull = Rtmp1; 131 __ li(Rnull, 0); 132 } 133 if (UseCompressedOops) { 134 __ stw(Rnull, offset, Rbase); 135 } else { 136 __ std(Rnull, offset, Rbase); 137 } 138 } 139 __ bind(Ldone); 140 } 141 break; 142 case BarrierSet::ModRef: 143 ShouldNotReachHere(); 144 break; 145 default: 146 ShouldNotReachHere(); 147 } 148 } 149 150 // ============================================================================ 151 // Platform-dependent initialization 152 153 void TemplateTable::pd_initialize() { 154 // No ppc64 specific initialization. 155 } 156 157 Address TemplateTable::at_bcp(int offset) { 158 // Not used on ppc. 159 ShouldNotReachHere(); 160 return Address(); 161 } 162 163 // Patches the current bytecode (ptr to it located in bcp) 164 // in the bytecode stream with a new one. 165 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 166 // With sharing on, may need to test method flag. 167 if (!RewriteBytecodes) return; 168 Label L_patch_done; 169 170 switch (new_bc) { 171 case Bytecodes::_fast_aputfield: 172 case Bytecodes::_fast_bputfield: 173 case Bytecodes::_fast_zputfield: 174 case Bytecodes::_fast_cputfield: 175 case Bytecodes::_fast_dputfield: 176 case Bytecodes::_fast_fputfield: 177 case Bytecodes::_fast_iputfield: 178 case Bytecodes::_fast_lputfield: 179 case Bytecodes::_fast_sputfield: 180 { 181 // We skip bytecode quickening for putfield instructions when 182 // the put_code written to the constant pool cache is zero. 183 // This is required so that every execution of this instruction 184 // calls out to InterpreterRuntime::resolve_get_put to do 185 // additional, required work. 186 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 187 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 188 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 189 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 190 #if defined(VM_LITTLE_ENDIAN) 191 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 192 #else 193 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 194 #endif 195 __ cmpwi(CCR0, Rnew_bc, 0); 196 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 197 __ beq(CCR0, L_patch_done); 198 // __ isync(); // acquire not needed 199 break; 200 } 201 202 default: 203 assert(byte_no == -1, "sanity"); 204 if (load_bc_into_bc_reg) { 205 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 206 } 207 } 208 209 if (JvmtiExport::can_post_breakpoint()) { 210 Label L_fast_patch; 211 __ lbz(Rtemp, 0, R14_bcp); 212 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 213 __ bne(CCR0, L_fast_patch); 214 // Perform the quickening, slowly, in the bowels of the breakpoint table. 215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 216 __ b(L_patch_done); 217 __ bind(L_fast_patch); 218 } 219 220 // Patch bytecode. 221 __ stb(Rnew_bc, 0, R14_bcp); 222 223 __ bind(L_patch_done); 224 } 225 226 // ============================================================================ 227 // Individual instructions 228 229 void TemplateTable::nop() { 230 transition(vtos, vtos); 231 // Nothing to do. 232 } 233 234 void TemplateTable::shouldnotreachhere() { 235 transition(vtos, vtos); 236 __ stop("shouldnotreachhere bytecode"); 237 } 238 239 void TemplateTable::aconst_null() { 240 transition(vtos, atos); 241 __ li(R17_tos, 0); 242 } 243 244 void TemplateTable::iconst(int value) { 245 transition(vtos, itos); 246 assert(value >= -1 && value <= 5, ""); 247 __ li(R17_tos, value); 248 } 249 250 void TemplateTable::lconst(int value) { 251 transition(vtos, ltos); 252 assert(value >= -1 && value <= 5, ""); 253 __ li(R17_tos, value); 254 } 255 256 void TemplateTable::fconst(int value) { 257 transition(vtos, ftos); 258 static float zero = 0.0; 259 static float one = 1.0; 260 static float two = 2.0; 261 switch (value) { 262 default: ShouldNotReachHere(); 263 case 0: { 264 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 265 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 266 break; 267 } 268 case 1: { 269 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 270 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 271 break; 272 } 273 case 2: { 274 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 275 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 276 break; 277 } 278 } 279 } 280 281 void TemplateTable::dconst(int value) { 282 transition(vtos, dtos); 283 static double zero = 0.0; 284 static double one = 1.0; 285 switch (value) { 286 case 0: { 287 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 288 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 289 break; 290 } 291 case 1: { 292 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 293 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 294 break; 295 } 296 default: ShouldNotReachHere(); 297 } 298 } 299 300 void TemplateTable::bipush() { 301 transition(vtos, itos); 302 __ lbz(R17_tos, 1, R14_bcp); 303 __ extsb(R17_tos, R17_tos); 304 } 305 306 void TemplateTable::sipush() { 307 transition(vtos, itos); 308 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 309 } 310 311 void TemplateTable::ldc(bool wide) { 312 Register Rscratch1 = R11_scratch1, 313 Rscratch2 = R12_scratch2, 314 Rcpool = R3_ARG1; 315 316 transition(vtos, vtos); 317 Label notInt, notClass, exit; 318 319 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 320 if (wide) { // Read index. 321 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 322 } else { 323 __ lbz(Rscratch1, 1, R14_bcp); 324 } 325 326 const int base_offset = ConstantPool::header_size() * wordSize; 327 const int tags_offset = Array<u1>::base_offset_in_bytes(); 328 329 // Get type from tags. 330 __ addi(Rscratch2, Rscratch2, tags_offset); 331 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 332 333 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 334 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 335 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 336 337 // Resolved class - need to call vm to get java mirror of the class. 338 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 339 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 340 __ beq(CCR0, notClass); 341 342 __ li(R4, wide ? 1 : 0); 343 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 344 __ push(atos); 345 __ b(exit); 346 347 __ align(32, 12); 348 __ bind(notClass); 349 __ addi(Rcpool, Rcpool, base_offset); 350 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 351 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 352 __ bne(CCR0, notInt); 353 __ lwax(R17_tos, Rcpool, Rscratch1); 354 __ push(itos); 355 __ b(exit); 356 357 __ align(32, 12); 358 __ bind(notInt); 359 #ifdef ASSERT 360 // String and Object are rewritten to fast_aldc 361 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 362 __ asm_assert_eq("unexpected type", 0x8765); 363 #endif 364 __ lfsx(F15_ftos, Rcpool, Rscratch1); 365 __ push(ftos); 366 367 __ align(32, 12); 368 __ bind(exit); 369 } 370 371 // Fast path for caching oop constants. 372 void TemplateTable::fast_aldc(bool wide) { 373 transition(vtos, atos); 374 375 int index_size = wide ? sizeof(u2) : sizeof(u1); 376 const Register Rscratch = R11_scratch1; 377 Label is_null; 378 379 // We are resolved if the resolved reference cache entry contains a 380 // non-null object (CallSite, etc.) 381 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 382 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 383 __ verify_oop(R17_tos); 384 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 385 386 __ bind(is_null); 387 __ load_const_optimized(R3_ARG1, (int)bytecode()); 388 389 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 390 391 // First time invocation - must resolve first. 392 __ call_VM(R17_tos, entry, R3_ARG1); 393 __ verify_oop(R17_tos); 394 } 395 396 void TemplateTable::ldc2_w() { 397 transition(vtos, vtos); 398 Label Llong, Lexit; 399 400 Register Rindex = R11_scratch1, 401 Rcpool = R12_scratch2, 402 Rtag = R3_ARG1; 403 __ get_cpool_and_tags(Rcpool, Rtag); 404 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 405 406 const int base_offset = ConstantPool::header_size() * wordSize; 407 const int tags_offset = Array<u1>::base_offset_in_bytes(); 408 // Get type from tags. 409 __ addi(Rcpool, Rcpool, base_offset); 410 __ addi(Rtag, Rtag, tags_offset); 411 412 __ lbzx(Rtag, Rtag, Rindex); 413 414 __ sldi(Rindex, Rindex, LogBytesPerWord); 415 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 416 __ bne(CCR0, Llong); 417 // A double can be placed at word-aligned locations in the constant pool. 418 // Check out Conversions.java for an example. 419 // Also ConstantPool::header_size() is 20, which makes it very difficult 420 // to double-align double on the constant pool. SG, 11/7/97 421 __ lfdx(F15_ftos, Rcpool, Rindex); 422 __ push(dtos); 423 __ b(Lexit); 424 425 __ bind(Llong); 426 __ ldx(R17_tos, Rcpool, Rindex); 427 __ push(ltos); 428 429 __ bind(Lexit); 430 } 431 432 // Get the locals index located in the bytecode stream at bcp + offset. 433 void TemplateTable::locals_index(Register Rdst, int offset) { 434 __ lbz(Rdst, offset, R14_bcp); 435 } 436 437 void TemplateTable::iload() { 438 iload_internal(); 439 } 440 441 void TemplateTable::nofast_iload() { 442 iload_internal(may_not_rewrite); 443 } 444 445 void TemplateTable::iload_internal(RewriteControl rc) { 446 transition(vtos, itos); 447 448 // Get the local value into tos 449 const Register Rindex = R22_tmp2; 450 locals_index(Rindex); 451 452 // Rewrite iload,iload pair into fast_iload2 453 // iload,caload pair into fast_icaload 454 if (RewriteFrequentPairs && rc == may_rewrite) { 455 Label Lrewrite, Ldone; 456 Register Rnext_byte = R3_ARG1, 457 Rrewrite_to = R6_ARG4, 458 Rscratch = R11_scratch1; 459 460 // get next byte 461 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 462 463 // if _iload, wait to rewrite to iload2. We only want to rewrite the 464 // last two iloads in a pair. Comparing against fast_iload means that 465 // the next bytecode is neither an iload or a caload, and therefore 466 // an iload pair. 467 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 468 __ beq(CCR0, Ldone); 469 470 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 471 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 472 __ beq(CCR1, Lrewrite); 473 474 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 475 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 476 __ beq(CCR0, Lrewrite); 477 478 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 479 480 __ bind(Lrewrite); 481 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 482 __ bind(Ldone); 483 } 484 485 __ load_local_int(R17_tos, Rindex, Rindex); 486 } 487 488 // Load 2 integers in a row without dispatching 489 void TemplateTable::fast_iload2() { 490 transition(vtos, itos); 491 492 __ lbz(R3_ARG1, 1, R14_bcp); 493 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 494 495 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 496 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 497 __ push_i(R3_ARG1); 498 } 499 500 void TemplateTable::fast_iload() { 501 transition(vtos, itos); 502 // Get the local value into tos 503 504 const Register Rindex = R11_scratch1; 505 locals_index(Rindex); 506 __ load_local_int(R17_tos, Rindex, Rindex); 507 } 508 509 // Load a local variable type long from locals area to TOS cache register. 510 // Local index resides in bytecodestream. 511 void TemplateTable::lload() { 512 transition(vtos, ltos); 513 514 const Register Rindex = R11_scratch1; 515 locals_index(Rindex); 516 __ load_local_long(R17_tos, Rindex, Rindex); 517 } 518 519 void TemplateTable::fload() { 520 transition(vtos, ftos); 521 522 const Register Rindex = R11_scratch1; 523 locals_index(Rindex); 524 __ load_local_float(F15_ftos, Rindex, Rindex); 525 } 526 527 void TemplateTable::dload() { 528 transition(vtos, dtos); 529 530 const Register Rindex = R11_scratch1; 531 locals_index(Rindex); 532 __ load_local_double(F15_ftos, Rindex, Rindex); 533 } 534 535 void TemplateTable::aload() { 536 transition(vtos, atos); 537 538 const Register Rindex = R11_scratch1; 539 locals_index(Rindex); 540 __ load_local_ptr(R17_tos, Rindex, Rindex); 541 } 542 543 void TemplateTable::locals_index_wide(Register Rdst) { 544 // Offset is 2, not 1, because Lbcp points to wide prefix code. 545 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 546 } 547 548 void TemplateTable::wide_iload() { 549 // Get the local value into tos. 550 551 const Register Rindex = R11_scratch1; 552 locals_index_wide(Rindex); 553 __ load_local_int(R17_tos, Rindex, Rindex); 554 } 555 556 void TemplateTable::wide_lload() { 557 transition(vtos, ltos); 558 559 const Register Rindex = R11_scratch1; 560 locals_index_wide(Rindex); 561 __ load_local_long(R17_tos, Rindex, Rindex); 562 } 563 564 void TemplateTable::wide_fload() { 565 transition(vtos, ftos); 566 567 const Register Rindex = R11_scratch1; 568 locals_index_wide(Rindex); 569 __ load_local_float(F15_ftos, Rindex, Rindex); 570 } 571 572 void TemplateTable::wide_dload() { 573 transition(vtos, dtos); 574 575 const Register Rindex = R11_scratch1; 576 locals_index_wide(Rindex); 577 __ load_local_double(F15_ftos, Rindex, Rindex); 578 } 579 580 void TemplateTable::wide_aload() { 581 transition(vtos, atos); 582 583 const Register Rindex = R11_scratch1; 584 locals_index_wide(Rindex); 585 __ load_local_ptr(R17_tos, Rindex, Rindex); 586 } 587 588 void TemplateTable::iaload() { 589 transition(itos, itos); 590 591 const Register Rload_addr = R3_ARG1, 592 Rarray = R4_ARG2, 593 Rtemp = R5_ARG3; 594 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 595 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 596 } 597 598 void TemplateTable::laload() { 599 transition(itos, ltos); 600 601 const Register Rload_addr = R3_ARG1, 602 Rarray = R4_ARG2, 603 Rtemp = R5_ARG3; 604 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 605 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 606 } 607 608 void TemplateTable::faload() { 609 transition(itos, ftos); 610 611 const Register Rload_addr = R3_ARG1, 612 Rarray = R4_ARG2, 613 Rtemp = R5_ARG3; 614 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 615 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 616 } 617 618 void TemplateTable::daload() { 619 transition(itos, dtos); 620 621 const Register Rload_addr = R3_ARG1, 622 Rarray = R4_ARG2, 623 Rtemp = R5_ARG3; 624 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 625 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 626 } 627 628 void TemplateTable::aaload() { 629 transition(itos, atos); 630 631 // tos: index 632 // result tos: array 633 const Register Rload_addr = R3_ARG1, 634 Rarray = R4_ARG2, 635 Rtemp = R5_ARG3; 636 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 637 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 638 __ verify_oop(R17_tos); 639 //__ dcbt(R17_tos); // prefetch 640 } 641 642 void TemplateTable::baload() { 643 transition(itos, itos); 644 645 const Register Rload_addr = R3_ARG1, 646 Rarray = R4_ARG2, 647 Rtemp = R5_ARG3; 648 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 649 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 650 __ extsb(R17_tos, R17_tos); 651 } 652 653 void TemplateTable::caload() { 654 transition(itos, itos); 655 656 const Register Rload_addr = R3_ARG1, 657 Rarray = R4_ARG2, 658 Rtemp = R5_ARG3; 659 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 660 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 661 } 662 663 // Iload followed by caload frequent pair. 664 void TemplateTable::fast_icaload() { 665 transition(vtos, itos); 666 667 const Register Rload_addr = R3_ARG1, 668 Rarray = R4_ARG2, 669 Rtemp = R11_scratch1; 670 671 locals_index(R17_tos); 672 __ load_local_int(R17_tos, Rtemp, R17_tos); 673 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 674 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 675 } 676 677 void TemplateTable::saload() { 678 transition(itos, itos); 679 680 const Register Rload_addr = R11_scratch1, 681 Rarray = R12_scratch2, 682 Rtemp = R3_ARG1; 683 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 684 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 685 } 686 687 void TemplateTable::iload(int n) { 688 transition(vtos, itos); 689 690 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 691 } 692 693 void TemplateTable::lload(int n) { 694 transition(vtos, ltos); 695 696 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 697 } 698 699 void TemplateTable::fload(int n) { 700 transition(vtos, ftos); 701 702 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 703 } 704 705 void TemplateTable::dload(int n) { 706 transition(vtos, dtos); 707 708 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 709 } 710 711 void TemplateTable::aload(int n) { 712 transition(vtos, atos); 713 714 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 715 } 716 717 void TemplateTable::aload_0() { 718 aload_0_internal(); 719 } 720 721 void TemplateTable::nofast_aload_0() { 722 aload_0_internal(may_not_rewrite); 723 } 724 725 void TemplateTable::aload_0_internal(RewriteControl rc) { 726 transition(vtos, atos); 727 // According to bytecode histograms, the pairs: 728 // 729 // _aload_0, _fast_igetfield 730 // _aload_0, _fast_agetfield 731 // _aload_0, _fast_fgetfield 732 // 733 // occur frequently. If RewriteFrequentPairs is set, the (slow) 734 // _aload_0 bytecode checks if the next bytecode is either 735 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 736 // rewrites the current bytecode into a pair bytecode; otherwise it 737 // rewrites the current bytecode into _0 that doesn't do 738 // the pair check anymore. 739 // 740 // Note: If the next bytecode is _getfield, the rewrite must be 741 // delayed, otherwise we may miss an opportunity for a pair. 742 // 743 // Also rewrite frequent pairs 744 // aload_0, aload_1 745 // aload_0, iload_1 746 // These bytecodes with a small amount of code are most profitable 747 // to rewrite. 748 749 if (RewriteFrequentPairs && rc == may_rewrite) { 750 751 Label Lrewrite, Ldont_rewrite; 752 Register Rnext_byte = R3_ARG1, 753 Rrewrite_to = R6_ARG4, 754 Rscratch = R11_scratch1; 755 756 // Get next byte. 757 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 758 759 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 760 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 761 __ beq(CCR0, Ldont_rewrite); 762 763 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 764 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 765 __ beq(CCR1, Lrewrite); 766 767 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 768 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 769 __ beq(CCR0, Lrewrite); 770 771 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 772 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 773 __ beq(CCR1, Lrewrite); 774 775 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 776 777 __ bind(Lrewrite); 778 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 779 __ bind(Ldont_rewrite); 780 } 781 782 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 783 aload(0); 784 } 785 786 void TemplateTable::istore() { 787 transition(itos, vtos); 788 789 const Register Rindex = R11_scratch1; 790 locals_index(Rindex); 791 __ store_local_int(R17_tos, Rindex); 792 } 793 794 void TemplateTable::lstore() { 795 transition(ltos, vtos); 796 const Register Rindex = R11_scratch1; 797 locals_index(Rindex); 798 __ store_local_long(R17_tos, Rindex); 799 } 800 801 void TemplateTable::fstore() { 802 transition(ftos, vtos); 803 804 const Register Rindex = R11_scratch1; 805 locals_index(Rindex); 806 __ store_local_float(F15_ftos, Rindex); 807 } 808 809 void TemplateTable::dstore() { 810 transition(dtos, vtos); 811 812 const Register Rindex = R11_scratch1; 813 locals_index(Rindex); 814 __ store_local_double(F15_ftos, Rindex); 815 } 816 817 void TemplateTable::astore() { 818 transition(vtos, vtos); 819 820 const Register Rindex = R11_scratch1; 821 __ pop_ptr(); 822 __ verify_oop_or_return_address(R17_tos, Rindex); 823 locals_index(Rindex); 824 __ store_local_ptr(R17_tos, Rindex); 825 } 826 827 void TemplateTable::wide_istore() { 828 transition(vtos, vtos); 829 830 const Register Rindex = R11_scratch1; 831 __ pop_i(); 832 locals_index_wide(Rindex); 833 __ store_local_int(R17_tos, Rindex); 834 } 835 836 void TemplateTable::wide_lstore() { 837 transition(vtos, vtos); 838 839 const Register Rindex = R11_scratch1; 840 __ pop_l(); 841 locals_index_wide(Rindex); 842 __ store_local_long(R17_tos, Rindex); 843 } 844 845 void TemplateTable::wide_fstore() { 846 transition(vtos, vtos); 847 848 const Register Rindex = R11_scratch1; 849 __ pop_f(); 850 locals_index_wide(Rindex); 851 __ store_local_float(F15_ftos, Rindex); 852 } 853 854 void TemplateTable::wide_dstore() { 855 transition(vtos, vtos); 856 857 const Register Rindex = R11_scratch1; 858 __ pop_d(); 859 locals_index_wide(Rindex); 860 __ store_local_double(F15_ftos, Rindex); 861 } 862 863 void TemplateTable::wide_astore() { 864 transition(vtos, vtos); 865 866 const Register Rindex = R11_scratch1; 867 __ pop_ptr(); 868 __ verify_oop_or_return_address(R17_tos, Rindex); 869 locals_index_wide(Rindex); 870 __ store_local_ptr(R17_tos, Rindex); 871 } 872 873 void TemplateTable::iastore() { 874 transition(itos, vtos); 875 876 const Register Rindex = R3_ARG1, 877 Rstore_addr = R4_ARG2, 878 Rarray = R5_ARG3, 879 Rtemp = R6_ARG4; 880 __ pop_i(Rindex); 881 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 882 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 883 } 884 885 void TemplateTable::lastore() { 886 transition(ltos, vtos); 887 888 const Register Rindex = R3_ARG1, 889 Rstore_addr = R4_ARG2, 890 Rarray = R5_ARG3, 891 Rtemp = R6_ARG4; 892 __ pop_i(Rindex); 893 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 894 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 895 } 896 897 void TemplateTable::fastore() { 898 transition(ftos, vtos); 899 900 const Register Rindex = R3_ARG1, 901 Rstore_addr = R4_ARG2, 902 Rarray = R5_ARG3, 903 Rtemp = R6_ARG4; 904 __ pop_i(Rindex); 905 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 906 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 907 } 908 909 void TemplateTable::dastore() { 910 transition(dtos, vtos); 911 912 const Register Rindex = R3_ARG1, 913 Rstore_addr = R4_ARG2, 914 Rarray = R5_ARG3, 915 Rtemp = R6_ARG4; 916 __ pop_i(Rindex); 917 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 918 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 919 } 920 921 // Pop 3 values from the stack and... 922 void TemplateTable::aastore() { 923 transition(vtos, vtos); 924 925 Label Lstore_ok, Lis_null, Ldone; 926 const Register Rindex = R3_ARG1, 927 Rarray = R4_ARG2, 928 Rscratch = R11_scratch1, 929 Rscratch2 = R12_scratch2, 930 Rarray_klass = R5_ARG3, 931 Rarray_element_klass = Rarray_klass, 932 Rvalue_klass = R6_ARG4, 933 Rstore_addr = R31; // Use register which survives VM call. 934 935 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 936 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 937 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 938 939 __ verify_oop(R17_tos); 940 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 941 // Rindex is dead! 942 Register Rscratch3 = Rindex; 943 944 // Do array store check - check for NULL value first. 945 __ cmpdi(CCR0, R17_tos, 0); 946 __ beq(CCR0, Lis_null); 947 948 __ load_klass(Rarray_klass, Rarray); 949 __ load_klass(Rvalue_klass, R17_tos); 950 951 // Do fast instanceof cache test. 952 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 953 954 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 955 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 956 957 // Fell through: subtype check failed => throw an exception. 958 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 959 __ mtctr(R11_scratch1); 960 __ bctr(); 961 962 __ bind(Lis_null); 963 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 964 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 965 __ profile_null_seen(Rscratch, Rscratch2); 966 __ b(Ldone); 967 968 // Store is OK. 969 __ bind(Lstore_ok); 970 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 971 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 972 973 __ bind(Ldone); 974 // Adjust sp (pops array, index and value). 975 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 976 } 977 978 void TemplateTable::bastore() { 979 transition(itos, vtos); 980 981 const Register Rindex = R11_scratch1, 982 Rarray = R12_scratch2, 983 Rscratch = R3_ARG1; 984 __ pop_i(Rindex); 985 __ pop_ptr(Rarray); 986 // tos: val 987 988 // Need to check whether array is boolean or byte 989 // since both types share the bastore bytecode. 990 __ load_klass(Rscratch, Rarray); 991 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 992 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 993 __ testbitdi(CCR0, R0, Rscratch, diffbit); 994 Label L_skip; 995 __ bfalse(CCR0, L_skip); 996 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 997 __ bind(L_skip); 998 999 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 1000 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 1001 } 1002 1003 void TemplateTable::castore() { 1004 transition(itos, vtos); 1005 1006 const Register Rindex = R11_scratch1, 1007 Rarray = R12_scratch2, 1008 Rscratch = R3_ARG1; 1009 __ pop_i(Rindex); 1010 // tos: val 1011 // Rarray: array ptr (popped by index_check) 1012 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1013 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1014 } 1015 1016 void TemplateTable::sastore() { 1017 castore(); 1018 } 1019 1020 void TemplateTable::istore(int n) { 1021 transition(itos, vtos); 1022 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1023 } 1024 1025 void TemplateTable::lstore(int n) { 1026 transition(ltos, vtos); 1027 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1028 } 1029 1030 void TemplateTable::fstore(int n) { 1031 transition(ftos, vtos); 1032 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1033 } 1034 1035 void TemplateTable::dstore(int n) { 1036 transition(dtos, vtos); 1037 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1038 } 1039 1040 void TemplateTable::astore(int n) { 1041 transition(vtos, vtos); 1042 1043 __ pop_ptr(); 1044 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1045 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1046 } 1047 1048 void TemplateTable::pop() { 1049 transition(vtos, vtos); 1050 1051 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1052 } 1053 1054 void TemplateTable::pop2() { 1055 transition(vtos, vtos); 1056 1057 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1058 } 1059 1060 void TemplateTable::dup() { 1061 transition(vtos, vtos); 1062 1063 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1064 __ push_ptr(R11_scratch1); 1065 } 1066 1067 void TemplateTable::dup_x1() { 1068 transition(vtos, vtos); 1069 1070 Register Ra = R11_scratch1, 1071 Rb = R12_scratch2; 1072 // stack: ..., a, b 1073 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1074 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1075 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1076 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1077 __ push_ptr(Rb); 1078 // stack: ..., b, a, b 1079 } 1080 1081 void TemplateTable::dup_x2() { 1082 transition(vtos, vtos); 1083 1084 Register Ra = R11_scratch1, 1085 Rb = R12_scratch2, 1086 Rc = R3_ARG1; 1087 1088 // stack: ..., a, b, c 1089 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1090 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1091 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1092 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1093 // stack: ..., c, b, c 1094 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1095 // stack: ..., c, a, c 1096 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1097 __ push_ptr(Rc); // push c 1098 // stack: ..., c, a, b, c 1099 } 1100 1101 void TemplateTable::dup2() { 1102 transition(vtos, vtos); 1103 1104 Register Ra = R11_scratch1, 1105 Rb = R12_scratch2; 1106 // stack: ..., a, b 1107 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1108 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1109 __ push_2ptrs(Ra, Rb); 1110 // stack: ..., a, b, a, b 1111 } 1112 1113 void TemplateTable::dup2_x1() { 1114 transition(vtos, vtos); 1115 1116 Register Ra = R11_scratch1, 1117 Rb = R12_scratch2, 1118 Rc = R3_ARG1; 1119 // stack: ..., a, b, c 1120 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1121 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1122 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1123 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1124 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1125 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1126 // stack: ..., b, c, a 1127 __ push_2ptrs(Rb, Rc); 1128 // stack: ..., b, c, a, b, c 1129 } 1130 1131 void TemplateTable::dup2_x2() { 1132 transition(vtos, vtos); 1133 1134 Register Ra = R11_scratch1, 1135 Rb = R12_scratch2, 1136 Rc = R3_ARG1, 1137 Rd = R4_ARG2; 1138 // stack: ..., a, b, c, d 1139 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1140 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1141 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1142 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1143 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1144 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1145 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1146 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1147 // stack: ..., c, d, a, b 1148 __ push_2ptrs(Rc, Rd); 1149 // stack: ..., c, d, a, b, c, d 1150 } 1151 1152 void TemplateTable::swap() { 1153 transition(vtos, vtos); 1154 // stack: ..., a, b 1155 1156 Register Ra = R11_scratch1, 1157 Rb = R12_scratch2; 1158 // stack: ..., a, b 1159 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1160 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1161 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1162 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1163 // stack: ..., b, a 1164 } 1165 1166 void TemplateTable::iop2(Operation op) { 1167 transition(itos, itos); 1168 1169 Register Rscratch = R11_scratch1; 1170 1171 __ pop_i(Rscratch); 1172 // tos = number of bits to shift 1173 // Rscratch = value to shift 1174 switch (op) { 1175 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1176 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1177 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1178 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1179 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1180 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1181 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1182 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1183 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1184 default: ShouldNotReachHere(); 1185 } 1186 } 1187 1188 void TemplateTable::lop2(Operation op) { 1189 transition(ltos, ltos); 1190 1191 Register Rscratch = R11_scratch1; 1192 __ pop_l(Rscratch); 1193 switch (op) { 1194 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1195 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1196 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1197 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1198 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1199 default: ShouldNotReachHere(); 1200 } 1201 } 1202 1203 void TemplateTable::idiv() { 1204 transition(itos, itos); 1205 1206 Label Lnormal, Lexception, Ldone; 1207 Register Rdividend = R11_scratch1; // Used by irem. 1208 1209 __ addi(R0, R17_tos, 1); 1210 __ cmplwi(CCR0, R0, 2); 1211 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1212 1213 __ cmpwi(CCR1, R17_tos, 0); 1214 __ beq(CCR1, Lexception); // divisor == 0 1215 1216 __ pop_i(Rdividend); 1217 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1218 __ b(Ldone); 1219 1220 __ bind(Lexception); 1221 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1222 __ mtctr(R11_scratch1); 1223 __ bctr(); 1224 1225 __ align(32, 12); 1226 __ bind(Lnormal); 1227 __ pop_i(Rdividend); 1228 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1229 __ bind(Ldone); 1230 } 1231 1232 void TemplateTable::irem() { 1233 transition(itos, itos); 1234 1235 __ mr(R12_scratch2, R17_tos); 1236 idiv(); 1237 __ mullw(R17_tos, R17_tos, R12_scratch2); 1238 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1239 } 1240 1241 void TemplateTable::lmul() { 1242 transition(ltos, ltos); 1243 1244 __ pop_l(R11_scratch1); 1245 __ mulld(R17_tos, R11_scratch1, R17_tos); 1246 } 1247 1248 void TemplateTable::ldiv() { 1249 transition(ltos, ltos); 1250 1251 Label Lnormal, Lexception, Ldone; 1252 Register Rdividend = R11_scratch1; // Used by lrem. 1253 1254 __ addi(R0, R17_tos, 1); 1255 __ cmpldi(CCR0, R0, 2); 1256 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1257 1258 __ cmpdi(CCR1, R17_tos, 0); 1259 __ beq(CCR1, Lexception); // divisor == 0 1260 1261 __ pop_l(Rdividend); 1262 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1263 __ b(Ldone); 1264 1265 __ bind(Lexception); 1266 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1267 __ mtctr(R11_scratch1); 1268 __ bctr(); 1269 1270 __ align(32, 12); 1271 __ bind(Lnormal); 1272 __ pop_l(Rdividend); 1273 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1274 __ bind(Ldone); 1275 } 1276 1277 void TemplateTable::lrem() { 1278 transition(ltos, ltos); 1279 1280 __ mr(R12_scratch2, R17_tos); 1281 ldiv(); 1282 __ mulld(R17_tos, R17_tos, R12_scratch2); 1283 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1284 } 1285 1286 void TemplateTable::lshl() { 1287 transition(itos, ltos); 1288 1289 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1290 __ pop_l(R11_scratch1); 1291 __ sld(R17_tos, R11_scratch1, R17_tos); 1292 } 1293 1294 void TemplateTable::lshr() { 1295 transition(itos, ltos); 1296 1297 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1298 __ pop_l(R11_scratch1); 1299 __ srad(R17_tos, R11_scratch1, R17_tos); 1300 } 1301 1302 void TemplateTable::lushr() { 1303 transition(itos, ltos); 1304 1305 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1306 __ pop_l(R11_scratch1); 1307 __ srd(R17_tos, R11_scratch1, R17_tos); 1308 } 1309 1310 void TemplateTable::fop2(Operation op) { 1311 transition(ftos, ftos); 1312 1313 switch (op) { 1314 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1315 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1316 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1317 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1318 case rem: 1319 __ pop_f(F1_ARG1); 1320 __ fmr(F2_ARG2, F15_ftos); 1321 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1322 __ fmr(F15_ftos, F1_RET); 1323 break; 1324 1325 default: ShouldNotReachHere(); 1326 } 1327 } 1328 1329 void TemplateTable::dop2(Operation op) { 1330 transition(dtos, dtos); 1331 1332 switch (op) { 1333 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1334 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1335 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1336 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1337 case rem: 1338 __ pop_d(F1_ARG1); 1339 __ fmr(F2_ARG2, F15_ftos); 1340 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1341 __ fmr(F15_ftos, F1_RET); 1342 break; 1343 1344 default: ShouldNotReachHere(); 1345 } 1346 } 1347 1348 // Negate the value in the TOS cache. 1349 void TemplateTable::ineg() { 1350 transition(itos, itos); 1351 1352 __ neg(R17_tos, R17_tos); 1353 } 1354 1355 // Negate the value in the TOS cache. 1356 void TemplateTable::lneg() { 1357 transition(ltos, ltos); 1358 1359 __ neg(R17_tos, R17_tos); 1360 } 1361 1362 void TemplateTable::fneg() { 1363 transition(ftos, ftos); 1364 1365 __ fneg(F15_ftos, F15_ftos); 1366 } 1367 1368 void TemplateTable::dneg() { 1369 transition(dtos, dtos); 1370 1371 __ fneg(F15_ftos, F15_ftos); 1372 } 1373 1374 // Increments a local variable in place. 1375 void TemplateTable::iinc() { 1376 transition(vtos, vtos); 1377 1378 const Register Rindex = R11_scratch1, 1379 Rincrement = R0, 1380 Rvalue = R12_scratch2; 1381 1382 locals_index(Rindex); // Load locals index from bytecode stream. 1383 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1384 __ extsb(Rincrement, Rincrement); 1385 1386 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1387 1388 __ add(Rvalue, Rincrement, Rvalue); 1389 __ stw(Rvalue, 0, Rindex); 1390 } 1391 1392 void TemplateTable::wide_iinc() { 1393 transition(vtos, vtos); 1394 1395 Register Rindex = R11_scratch1, 1396 Rlocals_addr = Rindex, 1397 Rincr = R12_scratch2; 1398 locals_index_wide(Rindex); 1399 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1400 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1401 __ add(R17_tos, Rincr, R17_tos); 1402 __ stw(R17_tos, 0, Rlocals_addr); 1403 } 1404 1405 void TemplateTable::convert() { 1406 // %%%%% Factor this first part accross platforms 1407 #ifdef ASSERT 1408 TosState tos_in = ilgl; 1409 TosState tos_out = ilgl; 1410 switch (bytecode()) { 1411 case Bytecodes::_i2l: // fall through 1412 case Bytecodes::_i2f: // fall through 1413 case Bytecodes::_i2d: // fall through 1414 case Bytecodes::_i2b: // fall through 1415 case Bytecodes::_i2c: // fall through 1416 case Bytecodes::_i2s: tos_in = itos; break; 1417 case Bytecodes::_l2i: // fall through 1418 case Bytecodes::_l2f: // fall through 1419 case Bytecodes::_l2d: tos_in = ltos; break; 1420 case Bytecodes::_f2i: // fall through 1421 case Bytecodes::_f2l: // fall through 1422 case Bytecodes::_f2d: tos_in = ftos; break; 1423 case Bytecodes::_d2i: // fall through 1424 case Bytecodes::_d2l: // fall through 1425 case Bytecodes::_d2f: tos_in = dtos; break; 1426 default : ShouldNotReachHere(); 1427 } 1428 switch (bytecode()) { 1429 case Bytecodes::_l2i: // fall through 1430 case Bytecodes::_f2i: // fall through 1431 case Bytecodes::_d2i: // fall through 1432 case Bytecodes::_i2b: // fall through 1433 case Bytecodes::_i2c: // fall through 1434 case Bytecodes::_i2s: tos_out = itos; break; 1435 case Bytecodes::_i2l: // fall through 1436 case Bytecodes::_f2l: // fall through 1437 case Bytecodes::_d2l: tos_out = ltos; break; 1438 case Bytecodes::_i2f: // fall through 1439 case Bytecodes::_l2f: // fall through 1440 case Bytecodes::_d2f: tos_out = ftos; break; 1441 case Bytecodes::_i2d: // fall through 1442 case Bytecodes::_l2d: // fall through 1443 case Bytecodes::_f2d: tos_out = dtos; break; 1444 default : ShouldNotReachHere(); 1445 } 1446 transition(tos_in, tos_out); 1447 #endif 1448 1449 // Conversion 1450 Label done; 1451 switch (bytecode()) { 1452 case Bytecodes::_i2l: 1453 __ extsw(R17_tos, R17_tos); 1454 break; 1455 1456 case Bytecodes::_l2i: 1457 // Nothing to do, we'll continue to work with the lower bits. 1458 break; 1459 1460 case Bytecodes::_i2b: 1461 __ extsb(R17_tos, R17_tos); 1462 break; 1463 1464 case Bytecodes::_i2c: 1465 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1466 break; 1467 1468 case Bytecodes::_i2s: 1469 __ extsh(R17_tos, R17_tos); 1470 break; 1471 1472 case Bytecodes::_i2d: 1473 __ extsw(R17_tos, R17_tos); 1474 case Bytecodes::_l2d: 1475 __ push_l_pop_d(); 1476 __ fcfid(F15_ftos, F15_ftos); 1477 break; 1478 1479 case Bytecodes::_i2f: 1480 __ extsw(R17_tos, R17_tos); 1481 __ push_l_pop_d(); 1482 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1483 // Comment: alternatively, load with sign extend could be done by lfiwax. 1484 __ fcfids(F15_ftos, F15_ftos); 1485 } else { 1486 __ fcfid(F15_ftos, F15_ftos); 1487 __ frsp(F15_ftos, F15_ftos); 1488 } 1489 break; 1490 1491 case Bytecodes::_l2f: 1492 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1493 __ push_l_pop_d(); 1494 __ fcfids(F15_ftos, F15_ftos); 1495 } else { 1496 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1497 __ mr(R3_ARG1, R17_tos); 1498 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1499 __ fmr(F15_ftos, F1_RET); 1500 } 1501 break; 1502 1503 case Bytecodes::_f2d: 1504 // empty 1505 break; 1506 1507 case Bytecodes::_d2f: 1508 __ frsp(F15_ftos, F15_ftos); 1509 break; 1510 1511 case Bytecodes::_d2i: 1512 case Bytecodes::_f2i: 1513 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1514 __ li(R17_tos, 0); // 0 in case of NAN 1515 __ bso(CCR0, done); 1516 __ fctiwz(F15_ftos, F15_ftos); 1517 __ push_d_pop_l(); 1518 break; 1519 1520 case Bytecodes::_d2l: 1521 case Bytecodes::_f2l: 1522 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1523 __ li(R17_tos, 0); // 0 in case of NAN 1524 __ bso(CCR0, done); 1525 __ fctidz(F15_ftos, F15_ftos); 1526 __ push_d_pop_l(); 1527 break; 1528 1529 default: ShouldNotReachHere(); 1530 } 1531 __ bind(done); 1532 } 1533 1534 // Long compare 1535 void TemplateTable::lcmp() { 1536 transition(ltos, itos); 1537 1538 const Register Rscratch = R11_scratch1; 1539 __ pop_l(Rscratch); // first operand, deeper in stack 1540 1541 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1542 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1543 __ srwi(Rscratch, R17_tos, 30); 1544 __ srawi(R17_tos, R17_tos, 31); 1545 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1546 } 1547 1548 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1549 // unordered_result == -1 => fcmpl or dcmpl 1550 // unordered_result == 1 => fcmpg or dcmpg 1551 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1552 const FloatRegister Rfirst = F0_SCRATCH, 1553 Rsecond = F15_ftos; 1554 const Register Rscratch = R11_scratch1; 1555 1556 if (is_float) { 1557 __ pop_f(Rfirst); 1558 } else { 1559 __ pop_d(Rfirst); 1560 } 1561 1562 Label Lunordered, Ldone; 1563 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1564 if (unordered_result) { 1565 __ bso(CCR0, Lunordered); 1566 } 1567 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1568 __ srwi(Rscratch, R17_tos, 30); 1569 __ srawi(R17_tos, R17_tos, 31); 1570 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1571 if (unordered_result) { 1572 __ b(Ldone); 1573 __ bind(Lunordered); 1574 __ load_const_optimized(R17_tos, unordered_result); 1575 } 1576 __ bind(Ldone); 1577 } 1578 1579 // Branch_conditional which takes TemplateTable::Condition. 1580 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1581 bool positive = false; 1582 Assembler::Condition cond = Assembler::equal; 1583 switch (cc) { 1584 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1585 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1586 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1587 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1588 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1589 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1590 default: ShouldNotReachHere(); 1591 } 1592 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1593 int bi = Assembler::bi0(crx, cond); 1594 __ bc(bo, bi, L); 1595 } 1596 1597 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1598 1599 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1600 __ verify_thread(); 1601 1602 const Register Rscratch1 = R11_scratch1, 1603 Rscratch2 = R12_scratch2, 1604 Rscratch3 = R3_ARG1, 1605 R4_counters = R4_ARG2, 1606 bumped_count = R31, 1607 Rdisp = R22_tmp2; 1608 1609 __ profile_taken_branch(Rscratch1, bumped_count); 1610 1611 // Get (wide) offset. 1612 if (is_wide) { 1613 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1614 } else { 1615 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1616 } 1617 1618 // -------------------------------------------------------------------------- 1619 // Handle all the JSR stuff here, then exit. 1620 // It's much shorter and cleaner than intermingling with the 1621 // non-JSR normal-branch stuff occurring below. 1622 if (is_jsr) { 1623 // Compute return address as bci in Otos_i. 1624 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1625 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1626 __ subf(R17_tos, Rscratch1, Rscratch2); 1627 1628 // Bump bcp to target of JSR. 1629 __ add(R14_bcp, Rdisp, R14_bcp); 1630 // Push returnAddress for "ret" on stack. 1631 __ push_ptr(R17_tos); 1632 // And away we go! 1633 __ dispatch_next(vtos); 1634 return; 1635 } 1636 1637 // -------------------------------------------------------------------------- 1638 // Normal (non-jsr) branch handling 1639 1640 // Bump bytecode pointer by displacement (take the branch). 1641 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1642 1643 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1644 if (increment_invocation_counter_for_backward_branches) { 1645 Label Lforward; 1646 __ dispatch_prolog(vtos); 1647 1648 // Check branch direction. 1649 __ cmpdi(CCR0, Rdisp, 0); 1650 __ bgt(CCR0, Lforward); 1651 1652 __ get_method_counters(R19_method, R4_counters, Lforward); 1653 1654 if (TieredCompilation) { 1655 Label Lno_mdo, Loverflow; 1656 const int increment = InvocationCounter::count_increment; 1657 if (ProfileInterpreter) { 1658 Register Rmdo = Rscratch1; 1659 1660 // If no method data exists, go to profile_continue. 1661 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1662 __ cmpdi(CCR0, Rmdo, 0); 1663 __ beq(CCR0, Lno_mdo); 1664 1665 // Increment backedge counter in the MDO. 1666 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1667 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1668 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo); 1669 __ addi(Rscratch2, Rscratch2, increment); 1670 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1671 if (UseOnStackReplacement) { 1672 __ and_(Rscratch3, Rscratch2, Rscratch3); 1673 __ bne(CCR0, Lforward); 1674 __ b(Loverflow); 1675 } else { 1676 __ b(Lforward); 1677 } 1678 } 1679 1680 // If there's no MDO, increment counter in method. 1681 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1682 __ bind(Lno_mdo); 1683 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1684 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters); 1685 __ addi(Rscratch2, Rscratch2, increment); 1686 __ stw(Rscratch2, mo_bc_offs, R4_counters); 1687 if (UseOnStackReplacement) { 1688 __ and_(Rscratch3, Rscratch2, Rscratch3); 1689 __ bne(CCR0, Lforward); 1690 } else { 1691 __ b(Lforward); 1692 } 1693 __ bind(Loverflow); 1694 1695 // Notify point for loop, pass branch bytecode. 1696 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp). 1697 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 1698 1699 // Was an OSR adapter generated? 1700 __ cmpdi(CCR0, R3_RET, 0); 1701 __ beq(CCR0, Lforward); 1702 1703 // Has the nmethod been invalidated already? 1704 __ lbz(R0, nmethod::state_offset(), R3_RET); 1705 __ cmpwi(CCR0, R0, nmethod::in_use); 1706 __ bne(CCR0, Lforward); 1707 1708 // Migrate the interpreter frame off of the stack. 1709 // We can use all registers because we will not return to interpreter from this point. 1710 1711 // Save nmethod. 1712 const Register osr_nmethod = R31; 1713 __ mr(osr_nmethod, R3_RET); 1714 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1715 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1716 __ reset_last_Java_frame(); 1717 // OSR buffer is in ARG1. 1718 1719 // Remove the interpreter frame. 1720 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1721 1722 // Jump to the osr code. 1723 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1724 __ mtlr(R0); 1725 __ mtctr(R11_scratch1); 1726 __ bctr(); 1727 1728 } else { 1729 1730 const Register invoke_ctr = Rscratch1; 1731 // Update Backedge branch separately from invocations. 1732 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1733 1734 if (ProfileInterpreter) { 1735 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward); 1736 if (UseOnStackReplacement) { 1737 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2); 1738 } 1739 } else { 1740 if (UseOnStackReplacement) { 1741 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2); 1742 } 1743 } 1744 } 1745 1746 __ bind(Lforward); 1747 __ dispatch_epilog(vtos); 1748 1749 } else { 1750 __ dispatch_next(vtos); 1751 } 1752 } 1753 1754 // Helper function for if_cmp* methods below. 1755 // Factored out common compare and branch code. 1756 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1757 Label Lnot_taken; 1758 // Note: The condition code we get is the condition under which we 1759 // *fall through*! So we have to inverse the CC here. 1760 1761 if (is_jint) { 1762 if (cmp0) { 1763 __ cmpwi(CCR0, Rfirst, 0); 1764 } else { 1765 __ cmpw(CCR0, Rfirst, Rsecond); 1766 } 1767 } else { 1768 if (cmp0) { 1769 __ cmpdi(CCR0, Rfirst, 0); 1770 } else { 1771 __ cmpd(CCR0, Rfirst, Rsecond); 1772 } 1773 } 1774 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1775 1776 // Conition is false => Jump! 1777 branch(false, false); 1778 1779 // Condition is not true => Continue. 1780 __ align(32, 12); 1781 __ bind(Lnot_taken); 1782 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1783 } 1784 1785 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1786 void TemplateTable::if_0cmp(Condition cc) { 1787 transition(itos, vtos); 1788 1789 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1790 } 1791 1792 // Compare integer values and fall through if CC holds, branch away otherwise. 1793 // 1794 // Interface: 1795 // - Rfirst: First operand (older stack value) 1796 // - tos: Second operand (younger stack value) 1797 void TemplateTable::if_icmp(Condition cc) { 1798 transition(itos, vtos); 1799 1800 const Register Rfirst = R0, 1801 Rsecond = R17_tos; 1802 1803 __ pop_i(Rfirst); 1804 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1805 } 1806 1807 void TemplateTable::if_nullcmp(Condition cc) { 1808 transition(atos, vtos); 1809 1810 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1811 } 1812 1813 void TemplateTable::if_acmp(Condition cc) { 1814 transition(atos, vtos); 1815 1816 const Register Rfirst = R0, 1817 Rsecond = R17_tos; 1818 1819 __ pop_ptr(Rfirst); 1820 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1821 } 1822 1823 void TemplateTable::ret() { 1824 locals_index(R11_scratch1); 1825 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1826 1827 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1828 1829 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1830 __ add(R11_scratch1, R17_tos, R11_scratch1); 1831 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1832 __ dispatch_next(vtos); 1833 } 1834 1835 void TemplateTable::wide_ret() { 1836 transition(vtos, vtos); 1837 1838 const Register Rindex = R3_ARG1, 1839 Rscratch1 = R11_scratch1, 1840 Rscratch2 = R12_scratch2; 1841 1842 locals_index_wide(Rindex); 1843 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1844 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1845 // Tos now contains the bci, compute the bcp from that. 1846 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1847 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1848 __ add(R14_bcp, Rscratch1, Rscratch2); 1849 __ dispatch_next(vtos); 1850 } 1851 1852 void TemplateTable::tableswitch() { 1853 transition(itos, vtos); 1854 1855 Label Ldispatch, Ldefault_case; 1856 Register Rlow_byte = R3_ARG1, 1857 Rindex = Rlow_byte, 1858 Rhigh_byte = R4_ARG2, 1859 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1860 Rscratch1 = R11_scratch1, 1861 Rscratch2 = R12_scratch2, 1862 Roffset = R6_ARG4; 1863 1864 // Align bcp. 1865 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1866 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1867 1868 // Load lo & hi. 1869 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1870 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1871 1872 // Check for default case (=index outside [low,high]). 1873 __ cmpw(CCR0, R17_tos, Rlow_byte); 1874 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1875 __ blt(CCR0, Ldefault_case); 1876 __ bgt(CCR1, Ldefault_case); 1877 1878 // Lookup dispatch offset. 1879 __ sub(Rindex, R17_tos, Rlow_byte); 1880 __ extsw(Rindex, Rindex); 1881 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1882 __ sldi(Rindex, Rindex, LogBytesPerInt); 1883 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1884 #if defined(VM_LITTLE_ENDIAN) 1885 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1886 __ extsw(Roffset, Roffset); 1887 #else 1888 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1889 #endif 1890 __ b(Ldispatch); 1891 1892 __ bind(Ldefault_case); 1893 __ profile_switch_default(Rhigh_byte, Rscratch1); 1894 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1895 1896 __ bind(Ldispatch); 1897 1898 __ add(R14_bcp, Roffset, R14_bcp); 1899 __ dispatch_next(vtos); 1900 } 1901 1902 void TemplateTable::lookupswitch() { 1903 transition(itos, itos); 1904 __ stop("lookupswitch bytecode should have been rewritten"); 1905 } 1906 1907 // Table switch using linear search through cases. 1908 // Bytecode stream format: 1909 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1910 // Note: Everything is big-endian format here. 1911 void TemplateTable::fast_linearswitch() { 1912 transition(itos, vtos); 1913 1914 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1915 Register Rcount = R3_ARG1, 1916 Rcurrent_pair = R4_ARG2, 1917 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1918 Roffset = R31, // Might need to survive C call. 1919 Rvalue = R12_scratch2, 1920 Rscratch = R11_scratch1, 1921 Rcmp_value = R17_tos; 1922 1923 // Align bcp. 1924 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1925 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1926 1927 // Setup loop counter and limit. 1928 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1929 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1930 1931 __ mtctr(Rcount); 1932 __ cmpwi(CCR0, Rcount, 0); 1933 __ bne(CCR0, Lloop_entry); 1934 1935 // Default case 1936 __ bind(Ldefault_case); 1937 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1938 if (ProfileInterpreter) { 1939 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1940 } 1941 __ b(Lcontinue_execution); 1942 1943 // Next iteration 1944 __ bind(Lsearch_loop); 1945 __ bdz(Ldefault_case); 1946 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1947 __ bind(Lloop_entry); 1948 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1949 __ cmpw(CCR0, Rvalue, Rcmp_value); 1950 __ bne(CCR0, Lsearch_loop); 1951 1952 // Found, load offset. 1953 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1954 // Calculate case index and profile 1955 __ mfctr(Rcurrent_pair); 1956 if (ProfileInterpreter) { 1957 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1958 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1959 } 1960 1961 __ bind(Lcontinue_execution); 1962 __ add(R14_bcp, Roffset, R14_bcp); 1963 __ dispatch_next(vtos); 1964 } 1965 1966 // Table switch using binary search (value/offset pairs are ordered). 1967 // Bytecode stream format: 1968 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1969 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1970 void TemplateTable::fast_binaryswitch() { 1971 1972 transition(itos, vtos); 1973 // Implementation using the following core algorithm: (copied from Intel) 1974 // 1975 // int binary_search(int key, LookupswitchPair* array, int n) { 1976 // // Binary search according to "Methodik des Programmierens" by 1977 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1978 // int i = 0; 1979 // int j = n; 1980 // while (i+1 < j) { 1981 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1982 // // with Q: for all i: 0 <= i < n: key < a[i] 1983 // // where a stands for the array and assuming that the (inexisting) 1984 // // element a[n] is infinitely big. 1985 // int h = (i + j) >> 1; 1986 // // i < h < j 1987 // if (key < array[h].fast_match()) { 1988 // j = h; 1989 // } else { 1990 // i = h; 1991 // } 1992 // } 1993 // // R: a[i] <= key < a[i+1] or Q 1994 // // (i.e., if key is within array, i is the correct index) 1995 // return i; 1996 // } 1997 1998 // register allocation 1999 const Register Rkey = R17_tos; // already set (tosca) 2000 const Register Rarray = R3_ARG1; 2001 const Register Ri = R4_ARG2; 2002 const Register Rj = R5_ARG3; 2003 const Register Rh = R6_ARG4; 2004 const Register Rscratch = R11_scratch1; 2005 2006 const int log_entry_size = 3; 2007 const int entry_size = 1 << log_entry_size; 2008 2009 Label found; 2010 2011 // Find Array start, 2012 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2013 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2014 2015 // initialize i & j 2016 __ li(Ri,0); 2017 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2018 2019 // and start. 2020 Label entry; 2021 __ b(entry); 2022 2023 // binary search loop 2024 { Label loop; 2025 __ bind(loop); 2026 // int h = (i + j) >> 1; 2027 __ srdi(Rh, Rh, 1); 2028 // if (key < array[h].fast_match()) { 2029 // j = h; 2030 // } else { 2031 // i = h; 2032 // } 2033 __ sldi(Rscratch, Rh, log_entry_size); 2034 #if defined(VM_LITTLE_ENDIAN) 2035 __ lwbrx(Rscratch, Rscratch, Rarray); 2036 #else 2037 __ lwzx(Rscratch, Rscratch, Rarray); 2038 #endif 2039 2040 // if (key < current value) 2041 // Rh = Rj 2042 // else 2043 // Rh = Ri 2044 Label Lgreater; 2045 __ cmpw(CCR0, Rkey, Rscratch); 2046 __ bge(CCR0, Lgreater); 2047 __ mr(Rj, Rh); 2048 __ b(entry); 2049 __ bind(Lgreater); 2050 __ mr(Ri, Rh); 2051 2052 // while (i+1 < j) 2053 __ bind(entry); 2054 __ addi(Rscratch, Ri, 1); 2055 __ cmpw(CCR0, Rscratch, Rj); 2056 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2057 2058 __ blt(CCR0, loop); 2059 } 2060 2061 // End of binary search, result index is i (must check again!). 2062 Label default_case; 2063 Label continue_execution; 2064 if (ProfileInterpreter) { 2065 __ mr(Rh, Ri); // Save index in i for profiling. 2066 } 2067 // Ri = value offset 2068 __ sldi(Ri, Ri, log_entry_size); 2069 __ add(Ri, Ri, Rarray); 2070 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2071 2072 Label not_found; 2073 // Ri = offset offset 2074 __ cmpw(CCR0, Rkey, Rscratch); 2075 __ beq(CCR0, not_found); 2076 // entry not found -> j = default offset 2077 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2078 __ b(default_case); 2079 2080 __ bind(not_found); 2081 // entry found -> j = offset 2082 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2083 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2084 2085 if (ProfileInterpreter) { 2086 __ b(continue_execution); 2087 } 2088 2089 __ bind(default_case); // fall through (if not profiling) 2090 __ profile_switch_default(Ri, Rscratch); 2091 2092 __ bind(continue_execution); 2093 2094 __ extsw(Rj, Rj); 2095 __ add(R14_bcp, Rj, R14_bcp); 2096 __ dispatch_next(vtos); 2097 } 2098 2099 void TemplateTable::_return(TosState state) { 2100 transition(state, state); 2101 assert(_desc->calls_vm(), 2102 "inconsistent calls_vm information"); // call in remove_activation 2103 2104 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2105 2106 Register Rscratch = R11_scratch1, 2107 Rklass = R12_scratch2, 2108 Rklass_flags = Rklass; 2109 Label Lskip_register_finalizer; 2110 2111 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2112 assert(state == vtos, "only valid state"); 2113 __ ld(R17_tos, 0, R18_locals); 2114 2115 // Load klass of this obj. 2116 __ load_klass(Rklass, R17_tos); 2117 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2118 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2119 __ bfalse(CCR0, Lskip_register_finalizer); 2120 2121 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2122 2123 __ align(32, 12); 2124 __ bind(Lskip_register_finalizer); 2125 } 2126 2127 // Move the result value into the correct register and remove memory stack frame. 2128 __ remove_activation(state, /* throw_monitor_exception */ true); 2129 // Restoration of lr done by remove_activation. 2130 switch (state) { 2131 // Narrow result if state is itos but result type is smaller. 2132 // Need to narrow in the return bytecode rather than in generate_return_entry 2133 // since compiled code callers expect the result to already be narrowed. 2134 case itos: __ narrow(R17_tos); /* fall through */ 2135 case ltos: 2136 case atos: __ mr(R3_RET, R17_tos); break; 2137 case ftos: 2138 case dtos: __ fmr(F1_RET, F15_ftos); break; 2139 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2140 // to get visible before the reference to the object gets stored anywhere. 2141 __ membar(Assembler::StoreStore); break; 2142 default : ShouldNotReachHere(); 2143 } 2144 __ blr(); 2145 } 2146 2147 // ============================================================================ 2148 // Constant pool cache access 2149 // 2150 // Memory ordering: 2151 // 2152 // Like done in C++ interpreter, we load the fields 2153 // - _indices 2154 // - _f12_oop 2155 // acquired, because these are asked if the cache is already resolved. We don't 2156 // want to float loads above this check. 2157 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2158 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2159 2160 // Call into the VM if call site is not yet resolved 2161 // 2162 // Input regs: 2163 // - None, all passed regs are outputs. 2164 // 2165 // Returns: 2166 // - Rcache: The const pool cache entry that contains the resolved result. 2167 // - Rresult: Either noreg or output for f1/f2. 2168 // 2169 // Kills: 2170 // - Rscratch 2171 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2172 2173 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2174 Label Lresolved, Ldone; 2175 2176 Bytecodes::Code code = bytecode(); 2177 switch (code) { 2178 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2179 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2180 } 2181 2182 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2183 // We are resolved if the indices offset contains the current bytecode. 2184 #if defined(VM_LITTLE_ENDIAN) 2185 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2186 #else 2187 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2188 #endif 2189 // Acquire by cmp-br-isync (see below). 2190 __ cmpdi(CCR0, Rscratch, (int)code); 2191 __ beq(CCR0, Lresolved); 2192 2193 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2194 __ li(R4_ARG2, code); 2195 __ call_VM(noreg, entry, R4_ARG2, true); 2196 2197 // Update registers with resolved info. 2198 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2199 __ b(Ldone); 2200 2201 __ bind(Lresolved); 2202 __ isync(); // Order load wrt. succeeding loads. 2203 __ bind(Ldone); 2204 } 2205 2206 // Load the constant pool cache entry at field accesses into registers. 2207 // The Rcache and Rindex registers must be set before call. 2208 // Input: 2209 // - Rcache, Rindex 2210 // Output: 2211 // - Robj, Roffset, Rflags 2212 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2213 Register Rcache, 2214 Register Rindex /* unused on PPC64 */, 2215 Register Roffset, 2216 Register Rflags, 2217 bool is_static = false) { 2218 assert_different_registers(Rcache, Rflags, Roffset); 2219 // assert(Rindex == noreg, "parameter not used on PPC64"); 2220 2221 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2222 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2223 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2224 if (is_static) { 2225 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2226 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2227 // Acquire not needed here. Following access has an address dependency on this value. 2228 } 2229 } 2230 2231 // Load the constant pool cache entry at invokes into registers. 2232 // Resolve if necessary. 2233 2234 // Input Registers: 2235 // - None, bcp is used, though 2236 // 2237 // Return registers: 2238 // - Rmethod (f1 field or f2 if invokevirtual) 2239 // - Ritable_index (f2 field) 2240 // - Rflags (flags field) 2241 // 2242 // Kills: 2243 // - R21 2244 // 2245 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2246 Register Rmethod, 2247 Register Ritable_index, 2248 Register Rflags, 2249 bool is_invokevirtual, 2250 bool is_invokevfinal, 2251 bool is_invokedynamic) { 2252 2253 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2254 // Determine constant pool cache field offsets. 2255 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2256 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2257 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2258 // Access constant pool cache fields. 2259 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2260 2261 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2262 2263 if (is_invokevfinal) { 2264 assert(Ritable_index == noreg, "register not used"); 2265 // Already resolved. 2266 __ get_cache_and_index_at_bcp(Rcache, 1); 2267 } else { 2268 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2269 } 2270 2271 __ ld(Rmethod, method_offset, Rcache); 2272 __ ld(Rflags, flags_offset, Rcache); 2273 2274 if (Ritable_index != noreg) { 2275 __ ld(Ritable_index, index_offset, Rcache); 2276 } 2277 } 2278 2279 // ============================================================================ 2280 // Field access 2281 2282 // Volatile variables demand their effects be made known to all CPU's 2283 // in order. Store buffers on most chips allow reads & writes to 2284 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2285 // without some kind of memory barrier (i.e., it's not sufficient that 2286 // the interpreter does not reorder volatile references, the hardware 2287 // also must not reorder them). 2288 // 2289 // According to the new Java Memory Model (JMM): 2290 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2291 // writes act as aquire & release, so: 2292 // (2) A read cannot let unrelated NON-volatile memory refs that 2293 // happen after the read float up to before the read. It's OK for 2294 // non-volatile memory refs that happen before the volatile read to 2295 // float down below it. 2296 // (3) Similar a volatile write cannot let unrelated NON-volatile 2297 // memory refs that happen BEFORE the write float down to after the 2298 // write. It's OK for non-volatile memory refs that happen after the 2299 // volatile write to float up before it. 2300 // 2301 // We only put in barriers around volatile refs (they are expensive), 2302 // not _between_ memory refs (that would require us to track the 2303 // flavor of the previous memory refs). Requirements (2) and (3) 2304 // require some barriers before volatile stores and after volatile 2305 // loads. These nearly cover requirement (1) but miss the 2306 // volatile-store-volatile-load case. This final case is placed after 2307 // volatile-stores although it could just as well go before 2308 // volatile-loads. 2309 2310 // The registers cache and index expected to be set before call. 2311 // Correct values of the cache and index registers are preserved. 2312 // Kills: 2313 // Rcache (if has_tos) 2314 // Rscratch 2315 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2316 2317 assert_different_registers(Rcache, Rscratch); 2318 2319 if (JvmtiExport::can_post_field_access()) { 2320 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2321 Label Lno_field_access_post; 2322 2323 // Check if post field access in enabled. 2324 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2325 __ lwz(Rscratch, offs, Rscratch); 2326 2327 __ cmpwi(CCR0, Rscratch, 0); 2328 __ beq(CCR0, Lno_field_access_post); 2329 2330 // Post access enabled - do it! 2331 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2332 if (is_static) { 2333 __ li(R17_tos, 0); 2334 } else { 2335 if (has_tos) { 2336 // The fast bytecode versions have obj ptr in register. 2337 // Thus, save object pointer before call_VM() clobbers it 2338 // put object on tos where GC wants it. 2339 __ push_ptr(R17_tos); 2340 } else { 2341 // Load top of stack (do not pop the value off the stack). 2342 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2343 } 2344 __ verify_oop(R17_tos); 2345 } 2346 // tos: object pointer or NULL if static 2347 // cache: cache entry pointer 2348 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2349 if (!is_static && has_tos) { 2350 // Restore object pointer. 2351 __ pop_ptr(R17_tos); 2352 __ verify_oop(R17_tos); 2353 } else { 2354 // Cache is still needed to get class or obj. 2355 __ get_cache_and_index_at_bcp(Rcache, 1); 2356 } 2357 2358 __ align(32, 12); 2359 __ bind(Lno_field_access_post); 2360 } 2361 } 2362 2363 // kills R11_scratch1 2364 void TemplateTable::pop_and_check_object(Register Roop) { 2365 Register Rtmp = R11_scratch1; 2366 2367 assert_different_registers(Rtmp, Roop); 2368 __ pop_ptr(Roop); 2369 // For field access must check obj. 2370 __ null_check_throw(Roop, -1, Rtmp); 2371 __ verify_oop(Roop); 2372 } 2373 2374 // PPC64: implement volatile loads as fence-store-acquire. 2375 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2376 transition(vtos, vtos); 2377 2378 Label Lacquire, Lisync; 2379 2380 const Register Rcache = R3_ARG1, 2381 Rclass_or_obj = R22_tmp2, 2382 Roffset = R23_tmp3, 2383 Rflags = R31, 2384 Rbtable = R5_ARG3, 2385 Rbc = R6_ARG4, 2386 Rscratch = R12_scratch2; 2387 2388 static address field_branch_table[number_of_states], 2389 static_branch_table[number_of_states]; 2390 2391 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2392 2393 // Get field offset. 2394 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2395 2396 // JVMTI support 2397 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2398 2399 // Load after possible GC. 2400 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2401 2402 // Load pointer to branch table. 2403 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2404 2405 // Get volatile flag. 2406 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2407 // Note: sync is needed before volatile load on PPC64. 2408 2409 // Check field type. 2410 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2411 2412 #ifdef ASSERT 2413 Label LFlagInvalid; 2414 __ cmpldi(CCR0, Rflags, number_of_states); 2415 __ bge(CCR0, LFlagInvalid); 2416 #endif 2417 2418 // Load from branch table and dispatch (volatile case: one instruction ahead). 2419 __ sldi(Rflags, Rflags, LogBytesPerWord); 2420 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2421 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2422 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2423 } 2424 __ ldx(Rbtable, Rbtable, Rflags); 2425 2426 // Get the obj from stack. 2427 if (!is_static) { 2428 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2429 } else { 2430 __ verify_oop(Rclass_or_obj); 2431 } 2432 2433 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2434 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2435 } 2436 __ mtctr(Rbtable); 2437 __ bctr(); 2438 2439 #ifdef ASSERT 2440 __ bind(LFlagInvalid); 2441 __ stop("got invalid flag", 0x654); 2442 #endif 2443 2444 if (!is_static && rc == may_not_rewrite) { 2445 // We reuse the code from is_static. It's jumped to via the table above. 2446 return; 2447 } 2448 2449 #ifdef ASSERT 2450 // __ bind(Lvtos); 2451 address pc_before_fence = __ pc(); 2452 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2453 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2454 assert(branch_table[vtos] == 0, "can't compute twice"); 2455 branch_table[vtos] = __ pc(); // non-volatile_entry point 2456 __ stop("vtos unexpected", 0x655); 2457 #endif 2458 2459 __ align(32, 28, 28); // Align load. 2460 // __ bind(Ldtos); 2461 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2462 assert(branch_table[dtos] == 0, "can't compute twice"); 2463 branch_table[dtos] = __ pc(); // non-volatile_entry point 2464 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2465 __ push(dtos); 2466 if (!is_static && rc == may_rewrite) { 2467 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2468 } 2469 { 2470 Label acquire_double; 2471 __ beq(CCR6, acquire_double); // Volatile? 2472 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2473 2474 __ bind(acquire_double); 2475 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2476 __ beq_predict_taken(CCR0, Lisync); 2477 __ b(Lisync); // In case of NAN. 2478 } 2479 2480 __ align(32, 28, 28); // Align load. 2481 // __ bind(Lftos); 2482 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2483 assert(branch_table[ftos] == 0, "can't compute twice"); 2484 branch_table[ftos] = __ pc(); // non-volatile_entry point 2485 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2486 __ push(ftos); 2487 if (!is_static && rc == may_rewrite) { 2488 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2489 } 2490 { 2491 Label acquire_float; 2492 __ beq(CCR6, acquire_float); // Volatile? 2493 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2494 2495 __ bind(acquire_float); 2496 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2497 __ beq_predict_taken(CCR0, Lisync); 2498 __ b(Lisync); // In case of NAN. 2499 } 2500 2501 __ align(32, 28, 28); // Align load. 2502 // __ bind(Litos); 2503 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2504 assert(branch_table[itos] == 0, "can't compute twice"); 2505 branch_table[itos] = __ pc(); // non-volatile_entry point 2506 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2507 __ push(itos); 2508 if (!is_static && rc == may_rewrite) { 2509 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2510 } 2511 __ beq(CCR6, Lacquire); // Volatile? 2512 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2513 2514 __ align(32, 28, 28); // Align load. 2515 // __ bind(Lltos); 2516 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2517 assert(branch_table[ltos] == 0, "can't compute twice"); 2518 branch_table[ltos] = __ pc(); // non-volatile_entry point 2519 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2520 __ push(ltos); 2521 if (!is_static && rc == may_rewrite) { 2522 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2523 } 2524 __ beq(CCR6, Lacquire); // Volatile? 2525 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2526 2527 __ align(32, 28, 28); // Align load. 2528 // __ bind(Lbtos); 2529 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2530 assert(branch_table[btos] == 0, "can't compute twice"); 2531 branch_table[btos] = __ pc(); // non-volatile_entry point 2532 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2533 __ extsb(R17_tos, R17_tos); 2534 __ push(btos); 2535 if (!is_static && rc == may_rewrite) { 2536 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2537 } 2538 __ beq(CCR6, Lacquire); // Volatile? 2539 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2540 2541 __ align(32, 28, 28); // Align load. 2542 // __ bind(Lztos); (same code as btos) 2543 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2544 assert(branch_table[ztos] == 0, "can't compute twice"); 2545 branch_table[ztos] = __ pc(); // non-volatile_entry point 2546 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2547 __ push(ztos); 2548 if (!is_static && rc == may_rewrite) { 2549 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2550 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2551 } 2552 __ beq(CCR6, Lacquire); // Volatile? 2553 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2554 2555 __ align(32, 28, 28); // Align load. 2556 // __ bind(Lctos); 2557 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2558 assert(branch_table[ctos] == 0, "can't compute twice"); 2559 branch_table[ctos] = __ pc(); // non-volatile_entry point 2560 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2561 __ push(ctos); 2562 if (!is_static && rc == may_rewrite) { 2563 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2564 } 2565 __ beq(CCR6, Lacquire); // Volatile? 2566 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2567 2568 __ align(32, 28, 28); // Align load. 2569 // __ bind(Lstos); 2570 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2571 assert(branch_table[stos] == 0, "can't compute twice"); 2572 branch_table[stos] = __ pc(); // non-volatile_entry point 2573 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2574 __ push(stos); 2575 if (!is_static && rc == may_rewrite) { 2576 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2577 } 2578 __ beq(CCR6, Lacquire); // Volatile? 2579 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2580 2581 __ align(32, 28, 28); // Align load. 2582 // __ bind(Latos); 2583 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2584 assert(branch_table[atos] == 0, "can't compute twice"); 2585 branch_table[atos] = __ pc(); // non-volatile_entry point 2586 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2587 __ verify_oop(R17_tos); 2588 __ push(atos); 2589 //__ dcbt(R17_tos); // prefetch 2590 if (!is_static && rc == may_rewrite) { 2591 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2592 } 2593 __ beq(CCR6, Lacquire); // Volatile? 2594 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2595 2596 __ align(32, 12); 2597 __ bind(Lacquire); 2598 __ twi_0(R17_tos); 2599 __ bind(Lisync); 2600 __ isync(); // acquire 2601 2602 #ifdef ASSERT 2603 for (int i = 0; i<number_of_states; ++i) { 2604 assert(branch_table[i], "get initialization"); 2605 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2606 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2607 } 2608 #endif 2609 } 2610 2611 void TemplateTable::getfield(int byte_no) { 2612 getfield_or_static(byte_no, false); 2613 } 2614 2615 void TemplateTable::nofast_getfield(int byte_no) { 2616 getfield_or_static(byte_no, false, may_not_rewrite); 2617 } 2618 2619 void TemplateTable::getstatic(int byte_no) { 2620 getfield_or_static(byte_no, true); 2621 } 2622 2623 // The registers cache and index expected to be set before call. 2624 // The function may destroy various registers, just not the cache and index registers. 2625 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2626 2627 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2628 2629 if (JvmtiExport::can_post_field_modification()) { 2630 Label Lno_field_mod_post; 2631 2632 // Check if post field access in enabled. 2633 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2634 __ lwz(Rscratch, offs, Rscratch); 2635 2636 __ cmpwi(CCR0, Rscratch, 0); 2637 __ beq(CCR0, Lno_field_mod_post); 2638 2639 // Do the post 2640 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2641 const Register Robj = Rscratch; 2642 2643 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2644 if (is_static) { 2645 // Life is simple. Null out the object pointer. 2646 __ li(Robj, 0); 2647 } else { 2648 // In case of the fast versions, value lives in registers => put it back on tos. 2649 int offs = Interpreter::expr_offset_in_bytes(0); 2650 Register base = R15_esp; 2651 switch(bytecode()) { 2652 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2653 case Bytecodes::_fast_iputfield: // Fall through 2654 case Bytecodes::_fast_bputfield: // Fall through 2655 case Bytecodes::_fast_zputfield: // Fall through 2656 case Bytecodes::_fast_cputfield: // Fall through 2657 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2658 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2659 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2660 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2661 default: { 2662 offs = 0; 2663 base = Robj; 2664 const Register Rflags = Robj; 2665 Label is_one_slot; 2666 // Life is harder. The stack holds the value on top, followed by the 2667 // object. We don't know the size of the value, though; it could be 2668 // one or two words depending on its type. As a result, we must find 2669 // the type to determine where the object is. 2670 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2671 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2672 2673 __ cmpwi(CCR0, Rflags, ltos); 2674 __ cmpwi(CCR1, Rflags, dtos); 2675 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2676 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2677 __ beq(CCR0, is_one_slot); 2678 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2679 __ bind(is_one_slot); 2680 break; 2681 } 2682 } 2683 __ ld(Robj, offs, base); 2684 __ verify_oop(Robj); 2685 } 2686 2687 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2688 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2689 __ get_cache_and_index_at_bcp(Rcache, 1); 2690 2691 // In case of the fast versions, value lives in registers => put it back on tos. 2692 switch(bytecode()) { 2693 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2694 case Bytecodes::_fast_iputfield: // Fall through 2695 case Bytecodes::_fast_bputfield: // Fall through 2696 case Bytecodes::_fast_zputfield: // Fall through 2697 case Bytecodes::_fast_cputfield: // Fall through 2698 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2699 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2700 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2701 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2702 default: break; // Nothin' to do. 2703 } 2704 2705 __ align(32, 12); 2706 __ bind(Lno_field_mod_post); 2707 } 2708 } 2709 2710 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2711 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2712 Label Lvolatile; 2713 2714 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2715 Rclass_or_obj = R31, // Needs to survive C call. 2716 Roffset = R22_tmp2, // Needs to survive C call. 2717 Rflags = R3_ARG1, 2718 Rbtable = R4_ARG2, 2719 Rscratch = R11_scratch1, 2720 Rscratch2 = R12_scratch2, 2721 Rscratch3 = R6_ARG4, 2722 Rbc = Rscratch3; 2723 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2724 2725 static address field_rw_branch_table[number_of_states], 2726 field_norw_branch_table[number_of_states], 2727 static_branch_table[number_of_states]; 2728 2729 address* branch_table = is_static ? static_branch_table : 2730 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2731 2732 // Stack (grows up): 2733 // value 2734 // obj 2735 2736 // Load the field offset. 2737 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2738 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2739 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2740 2741 // Load pointer to branch table. 2742 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2743 2744 // Get volatile flag. 2745 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2746 2747 // Check the field type. 2748 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2749 2750 #ifdef ASSERT 2751 Label LFlagInvalid; 2752 __ cmpldi(CCR0, Rflags, number_of_states); 2753 __ bge(CCR0, LFlagInvalid); 2754 #endif 2755 2756 // Load from branch table and dispatch (volatile case: one instruction ahead). 2757 __ sldi(Rflags, Rflags, LogBytesPerWord); 2758 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2759 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2760 } 2761 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2762 __ ldx(Rbtable, Rbtable, Rflags); 2763 2764 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2765 __ mtctr(Rbtable); 2766 __ bctr(); 2767 2768 #ifdef ASSERT 2769 __ bind(LFlagInvalid); 2770 __ stop("got invalid flag", 0x656); 2771 2772 // __ bind(Lvtos); 2773 address pc_before_release = __ pc(); 2774 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2775 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2776 assert(branch_table[vtos] == 0, "can't compute twice"); 2777 branch_table[vtos] = __ pc(); // non-volatile_entry point 2778 __ stop("vtos unexpected", 0x657); 2779 #endif 2780 2781 __ align(32, 28, 28); // Align pop. 2782 // __ bind(Ldtos); 2783 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2784 assert(branch_table[dtos] == 0, "can't compute twice"); 2785 branch_table[dtos] = __ pc(); // non-volatile_entry point 2786 __ pop(dtos); 2787 if (!is_static) { 2788 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2789 } 2790 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2791 if (!is_static && rc == may_rewrite) { 2792 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2793 } 2794 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2795 __ beq(CR_is_vol, Lvolatile); // Volatile? 2796 } 2797 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2798 2799 __ align(32, 28, 28); // Align pop. 2800 // __ bind(Lftos); 2801 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2802 assert(branch_table[ftos] == 0, "can't compute twice"); 2803 branch_table[ftos] = __ pc(); // non-volatile_entry point 2804 __ pop(ftos); 2805 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2806 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2807 if (!is_static && rc == may_rewrite) { 2808 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2809 } 2810 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2811 __ beq(CR_is_vol, Lvolatile); // Volatile? 2812 } 2813 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2814 2815 __ align(32, 28, 28); // Align pop. 2816 // __ bind(Litos); 2817 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2818 assert(branch_table[itos] == 0, "can't compute twice"); 2819 branch_table[itos] = __ pc(); // non-volatile_entry point 2820 __ pop(itos); 2821 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2822 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2823 if (!is_static && rc == may_rewrite) { 2824 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2825 } 2826 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2827 __ beq(CR_is_vol, Lvolatile); // Volatile? 2828 } 2829 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2830 2831 __ align(32, 28, 28); // Align pop. 2832 // __ bind(Lltos); 2833 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2834 assert(branch_table[ltos] == 0, "can't compute twice"); 2835 branch_table[ltos] = __ pc(); // non-volatile_entry point 2836 __ pop(ltos); 2837 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2838 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2839 if (!is_static && rc == may_rewrite) { 2840 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2841 } 2842 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2843 __ beq(CR_is_vol, Lvolatile); // Volatile? 2844 } 2845 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2846 2847 __ align(32, 28, 28); // Align pop. 2848 // __ bind(Lbtos); 2849 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2850 assert(branch_table[btos] == 0, "can't compute twice"); 2851 branch_table[btos] = __ pc(); // non-volatile_entry point 2852 __ pop(btos); 2853 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2854 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2855 if (!is_static && rc == may_rewrite) { 2856 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2857 } 2858 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2859 __ beq(CR_is_vol, Lvolatile); // Volatile? 2860 } 2861 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2862 2863 __ align(32, 28, 28); // Align pop. 2864 // __ bind(Lztos); 2865 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2866 assert(branch_table[ztos] == 0, "can't compute twice"); 2867 branch_table[ztos] = __ pc(); // non-volatile_entry point 2868 __ pop(ztos); 2869 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2870 __ andi(R17_tos, R17_tos, 0x1); 2871 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2872 if (!is_static && rc == may_rewrite) { 2873 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); 2874 } 2875 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2876 __ beq(CR_is_vol, Lvolatile); // Volatile? 2877 } 2878 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2879 2880 __ align(32, 28, 28); // Align pop. 2881 // __ bind(Lctos); 2882 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2883 assert(branch_table[ctos] == 0, "can't compute twice"); 2884 branch_table[ctos] = __ pc(); // non-volatile_entry point 2885 __ pop(ctos); 2886 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2887 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2888 if (!is_static && rc == may_rewrite) { 2889 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 2890 } 2891 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2892 __ beq(CR_is_vol, Lvolatile); // Volatile? 2893 } 2894 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2895 2896 __ align(32, 28, 28); // Align pop. 2897 // __ bind(Lstos); 2898 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2899 assert(branch_table[stos] == 0, "can't compute twice"); 2900 branch_table[stos] = __ pc(); // non-volatile_entry point 2901 __ pop(stos); 2902 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2903 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2904 if (!is_static && rc == may_rewrite) { 2905 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 2906 } 2907 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2908 __ beq(CR_is_vol, Lvolatile); // Volatile? 2909 } 2910 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2911 2912 __ align(32, 28, 28); // Align pop. 2913 // __ bind(Latos); 2914 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2915 assert(branch_table[atos] == 0, "can't compute twice"); 2916 branch_table[atos] = __ pc(); // non-volatile_entry point 2917 __ pop(atos); 2918 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2919 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2920 if (!is_static && rc == may_rewrite) { 2921 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 2922 } 2923 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2924 __ beq(CR_is_vol, Lvolatile); // Volatile? 2925 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2926 2927 __ align(32, 12); 2928 __ bind(Lvolatile); 2929 __ fence(); 2930 } 2931 // fallthru: __ b(Lexit); 2932 2933 #ifdef ASSERT 2934 for (int i = 0; i<number_of_states; ++i) { 2935 assert(branch_table[i], "put initialization"); 2936 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2937 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2938 } 2939 #endif 2940 } 2941 2942 void TemplateTable::putfield(int byte_no) { 2943 putfield_or_static(byte_no, false); 2944 } 2945 2946 void TemplateTable::nofast_putfield(int byte_no) { 2947 putfield_or_static(byte_no, false, may_not_rewrite); 2948 } 2949 2950 void TemplateTable::putstatic(int byte_no) { 2951 putfield_or_static(byte_no, true); 2952 } 2953 2954 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2955 void TemplateTable::jvmti_post_fast_field_mod() { 2956 __ should_not_reach_here(); 2957 } 2958 2959 void TemplateTable::fast_storefield(TosState state) { 2960 transition(state, vtos); 2961 2962 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2963 Rclass_or_obj = R31, // Needs to survive C call. 2964 Roffset = R22_tmp2, // Needs to survive C call. 2965 Rflags = R3_ARG1, 2966 Rscratch = R11_scratch1, 2967 Rscratch2 = R12_scratch2, 2968 Rscratch3 = R4_ARG2; 2969 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2970 2971 // Constant pool already resolved => Load flags and offset of field. 2972 __ get_cache_and_index_at_bcp(Rcache, 1); 2973 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2974 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2975 2976 // Get the obj and the final store addr. 2977 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2978 2979 // Get volatile flag. 2980 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2981 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2982 { 2983 Label LnotVolatile; 2984 __ beq(CCR0, LnotVolatile); 2985 __ release(); 2986 __ align(32, 12); 2987 __ bind(LnotVolatile); 2988 } 2989 2990 // Do the store and fencing. 2991 switch(bytecode()) { 2992 case Bytecodes::_fast_aputfield: 2993 // Store into the field. 2994 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2995 break; 2996 2997 case Bytecodes::_fast_iputfield: 2998 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2999 break; 3000 3001 case Bytecodes::_fast_lputfield: 3002 __ stdx(R17_tos, Rclass_or_obj, Roffset); 3003 break; 3004 3005 case Bytecodes::_fast_zputfield: 3006 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 3007 // fall through to bputfield 3008 case Bytecodes::_fast_bputfield: 3009 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3010 break; 3011 3012 case Bytecodes::_fast_cputfield: 3013 case Bytecodes::_fast_sputfield: 3014 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3015 break; 3016 3017 case Bytecodes::_fast_fputfield: 3018 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 3019 break; 3020 3021 case Bytecodes::_fast_dputfield: 3022 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 3023 break; 3024 3025 default: ShouldNotReachHere(); 3026 } 3027 3028 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3029 Label LVolatile; 3030 __ beq(CR_is_vol, LVolatile); 3031 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3032 3033 __ align(32, 12); 3034 __ bind(LVolatile); 3035 __ fence(); 3036 } 3037 } 3038 3039 void TemplateTable::fast_accessfield(TosState state) { 3040 transition(atos, state); 3041 3042 Label LisVolatile; 3043 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3044 3045 const Register Rcache = R3_ARG1, 3046 Rclass_or_obj = R17_tos, 3047 Roffset = R22_tmp2, 3048 Rflags = R23_tmp3, 3049 Rscratch = R12_scratch2; 3050 3051 // Constant pool already resolved. Get the field offset. 3052 __ get_cache_and_index_at_bcp(Rcache, 1); 3053 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3054 3055 // JVMTI support 3056 jvmti_post_field_access(Rcache, Rscratch, false, true); 3057 3058 // Get the load address. 3059 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3060 3061 // Get volatile flag. 3062 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3063 __ bne(CCR0, LisVolatile); 3064 3065 switch(bytecode()) { 3066 case Bytecodes::_fast_agetfield: 3067 { 3068 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3069 __ verify_oop(R17_tos); 3070 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3071 3072 __ bind(LisVolatile); 3073 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3074 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3075 __ verify_oop(R17_tos); 3076 __ twi_0(R17_tos); 3077 __ isync(); 3078 break; 3079 } 3080 case Bytecodes::_fast_igetfield: 3081 { 3082 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3083 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3084 3085 __ bind(LisVolatile); 3086 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3087 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3088 __ twi_0(R17_tos); 3089 __ isync(); 3090 break; 3091 } 3092 case Bytecodes::_fast_lgetfield: 3093 { 3094 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3095 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3096 3097 __ bind(LisVolatile); 3098 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3099 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3100 __ twi_0(R17_tos); 3101 __ isync(); 3102 break; 3103 } 3104 case Bytecodes::_fast_bgetfield: 3105 { 3106 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3107 __ extsb(R17_tos, R17_tos); 3108 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3109 3110 __ bind(LisVolatile); 3111 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3112 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3113 __ twi_0(R17_tos); 3114 __ extsb(R17_tos, R17_tos); 3115 __ isync(); 3116 break; 3117 } 3118 case Bytecodes::_fast_cgetfield: 3119 { 3120 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3121 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3122 3123 __ bind(LisVolatile); 3124 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3125 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3126 __ twi_0(R17_tos); 3127 __ isync(); 3128 break; 3129 } 3130 case Bytecodes::_fast_sgetfield: 3131 { 3132 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3133 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3134 3135 __ bind(LisVolatile); 3136 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3137 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3138 __ twi_0(R17_tos); 3139 __ isync(); 3140 break; 3141 } 3142 case Bytecodes::_fast_fgetfield: 3143 { 3144 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3145 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3146 3147 __ bind(LisVolatile); 3148 Label Ldummy; 3149 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3150 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3151 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3152 __ bne_predict_not_taken(CCR0, Ldummy); 3153 __ bind(Ldummy); 3154 __ isync(); 3155 break; 3156 } 3157 case Bytecodes::_fast_dgetfield: 3158 { 3159 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3160 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3161 3162 __ bind(LisVolatile); 3163 Label Ldummy; 3164 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3165 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3166 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3167 __ bne_predict_not_taken(CCR0, Ldummy); 3168 __ bind(Ldummy); 3169 __ isync(); 3170 break; 3171 } 3172 default: ShouldNotReachHere(); 3173 } 3174 } 3175 3176 void TemplateTable::fast_xaccess(TosState state) { 3177 transition(vtos, state); 3178 3179 Label LisVolatile; 3180 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3181 const Register Rcache = R3_ARG1, 3182 Rclass_or_obj = R17_tos, 3183 Roffset = R22_tmp2, 3184 Rflags = R23_tmp3, 3185 Rscratch = R12_scratch2; 3186 3187 __ ld(Rclass_or_obj, 0, R18_locals); 3188 3189 // Constant pool already resolved. Get the field offset. 3190 __ get_cache_and_index_at_bcp(Rcache, 2); 3191 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3192 3193 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3194 3195 // Needed to report exception at the correct bcp. 3196 __ addi(R14_bcp, R14_bcp, 1); 3197 3198 // Get the load address. 3199 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3200 3201 // Get volatile flag. 3202 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3203 __ bne(CCR0, LisVolatile); 3204 3205 switch(state) { 3206 case atos: 3207 { 3208 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3209 __ verify_oop(R17_tos); 3210 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3211 3212 __ bind(LisVolatile); 3213 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3214 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3215 __ verify_oop(R17_tos); 3216 __ twi_0(R17_tos); 3217 __ isync(); 3218 break; 3219 } 3220 case itos: 3221 { 3222 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3223 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3224 3225 __ bind(LisVolatile); 3226 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3227 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3228 __ twi_0(R17_tos); 3229 __ isync(); 3230 break; 3231 } 3232 case ftos: 3233 { 3234 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3235 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3236 3237 __ bind(LisVolatile); 3238 Label Ldummy; 3239 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3240 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3241 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3242 __ bne_predict_not_taken(CCR0, Ldummy); 3243 __ bind(Ldummy); 3244 __ isync(); 3245 break; 3246 } 3247 default: ShouldNotReachHere(); 3248 } 3249 __ addi(R14_bcp, R14_bcp, -1); 3250 } 3251 3252 // ============================================================================ 3253 // Calls 3254 3255 // Common code for invoke 3256 // 3257 // Input: 3258 // - byte_no 3259 // 3260 // Output: 3261 // - Rmethod: The method to invoke next. 3262 // - Rret_addr: The return address to return to. 3263 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3264 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3265 // - Rflags: Method flags from const pool cache. 3266 // 3267 // Kills: 3268 // - Rscratch1 3269 // 3270 void TemplateTable::prepare_invoke(int byte_no, 3271 Register Rmethod, // linked method (or i-klass) 3272 Register Rret_addr,// return address 3273 Register Rindex, // itable index, MethodType, etc. 3274 Register Rrecv, // If caller wants to see it. 3275 Register Rflags, // If caller wants to test it. 3276 Register Rscratch 3277 ) { 3278 // Determine flags. 3279 const Bytecodes::Code code = bytecode(); 3280 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3281 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3282 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3283 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3284 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3285 const bool load_receiver = (Rrecv != noreg); 3286 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3287 3288 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3289 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3290 assert_different_registers(Rret_addr, Rscratch); 3291 3292 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3293 3294 // Saving of SP done in call_from_interpreter. 3295 3296 // Maybe push "appendix" to arguments. 3297 if (is_invokedynamic || is_invokehandle) { 3298 Label Ldone; 3299 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3300 __ beq(CCR0, Ldone); 3301 // Push "appendix" (MethodType, CallSite, etc.). 3302 // This must be done before we get the receiver, 3303 // since the parameter_size includes it. 3304 __ load_resolved_reference_at_index(Rscratch, Rindex); 3305 __ verify_oop(Rscratch); 3306 __ push_ptr(Rscratch); 3307 __ bind(Ldone); 3308 } 3309 3310 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3311 if (load_receiver) { 3312 const Register Rparam_count = Rscratch; 3313 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3314 __ load_receiver(Rparam_count, Rrecv); 3315 __ verify_oop(Rrecv); 3316 } 3317 3318 // Get return address. 3319 { 3320 Register Rtable_addr = Rscratch; 3321 Register Rret_type = Rret_addr; 3322 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3323 3324 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3325 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3326 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3327 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3328 // Get return address. 3329 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3330 } 3331 } 3332 3333 // Helper for virtual calls. Load target out of vtable and jump off! 3334 // Kills all passed registers. 3335 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3336 3337 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3338 const Register Rtarget_method = Rindex; 3339 3340 // Get target method & entry point. 3341 const int base = in_bytes(Klass::vtable_start_offset()); 3342 // Calc vtable addr scale the vtable index by 8. 3343 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes())); 3344 // Load target. 3345 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3346 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3347 // Argument and return type profiling. 3348 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3349 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3350 } 3351 3352 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3353 void TemplateTable::invokevirtual(int byte_no) { 3354 transition(vtos, vtos); 3355 3356 Register Rtable_addr = R11_scratch1, 3357 Rret_type = R12_scratch2, 3358 Rret_addr = R5_ARG3, 3359 Rflags = R22_tmp2, // Should survive C call. 3360 Rrecv = R3_ARG1, 3361 Rrecv_klass = Rrecv, 3362 Rvtableindex_or_method = R31, // Should survive C call. 3363 Rnum_params = R4_ARG2, 3364 Rnew_bc = R6_ARG4; 3365 3366 Label LnotFinal; 3367 3368 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3369 3370 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3371 __ bfalse(CCR0, LnotFinal); 3372 3373 if (RewriteBytecodes && !UseSharedSpaces) { 3374 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3375 } 3376 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3377 3378 __ align(32, 12); 3379 __ bind(LnotFinal); 3380 // Load "this" pointer (receiver). 3381 __ rldicl(Rnum_params, Rflags, 64, 48); 3382 __ load_receiver(Rnum_params, Rrecv); 3383 __ verify_oop(Rrecv); 3384 3385 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3386 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3387 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3388 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3389 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3390 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3391 __ load_klass(Rrecv_klass, Rrecv); 3392 __ verify_klass_ptr(Rrecv_klass); 3393 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3394 3395 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3396 } 3397 3398 void TemplateTable::fast_invokevfinal(int byte_no) { 3399 transition(vtos, vtos); 3400 3401 assert(byte_no == f2_byte, "use this argument"); 3402 Register Rflags = R22_tmp2, 3403 Rmethod = R31; 3404 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3405 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3406 } 3407 3408 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3409 3410 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3411 3412 // Load receiver from stack slot. 3413 Register Rrecv = Rscratch2; 3414 Register Rnum_params = Rrecv; 3415 3416 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3417 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3418 3419 // Get return address. 3420 Register Rtable_addr = Rscratch1, 3421 Rret_addr = Rflags, 3422 Rret_type = Rret_addr; 3423 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3424 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3425 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3426 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3427 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3428 3429 // Load receiver and receiver NULL check. 3430 __ load_receiver(Rnum_params, Rrecv); 3431 __ null_check_throw(Rrecv, -1, Rscratch1); 3432 3433 __ profile_final_call(Rrecv, Rscratch1); 3434 // Argument and return type profiling. 3435 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3436 3437 // Do the call. 3438 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3439 } 3440 3441 void TemplateTable::invokespecial(int byte_no) { 3442 assert(byte_no == f1_byte, "use this argument"); 3443 transition(vtos, vtos); 3444 3445 Register Rtable_addr = R3_ARG1, 3446 Rret_addr = R4_ARG2, 3447 Rflags = R5_ARG3, 3448 Rreceiver = R6_ARG4, 3449 Rmethod = R31; 3450 3451 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3452 3453 // Receiver NULL check. 3454 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3455 3456 __ profile_call(R11_scratch1, R12_scratch2); 3457 // Argument and return type profiling. 3458 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3459 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3460 } 3461 3462 void TemplateTable::invokestatic(int byte_no) { 3463 assert(byte_no == f1_byte, "use this argument"); 3464 transition(vtos, vtos); 3465 3466 Register Rtable_addr = R3_ARG1, 3467 Rret_addr = R4_ARG2, 3468 Rflags = R5_ARG3; 3469 3470 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3471 3472 __ profile_call(R11_scratch1, R12_scratch2); 3473 // Argument and return type profiling. 3474 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3475 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3476 } 3477 3478 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3479 Register Rret, 3480 Register Rflags, 3481 Register Rindex, 3482 Register Rtemp1, 3483 Register Rtemp2) { 3484 3485 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3486 Label LnotFinal; 3487 3488 // Check for vfinal. 3489 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3490 __ bfalse(CCR0, LnotFinal); 3491 3492 Register Rscratch = Rflags; // Rflags is dead now. 3493 3494 // Final call case. 3495 __ profile_final_call(Rtemp1, Rscratch); 3496 // Argument and return type profiling. 3497 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); 3498 // Do the final call - the index (f2) contains the method. 3499 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3500 3501 // Non-final callc case. 3502 __ bind(LnotFinal); 3503 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3504 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3505 } 3506 3507 void TemplateTable::invokeinterface(int byte_no) { 3508 assert(byte_no == f1_byte, "use this argument"); 3509 transition(vtos, vtos); 3510 3511 const Register Rscratch1 = R11_scratch1, 3512 Rscratch2 = R12_scratch2, 3513 Rscratch3 = R9_ARG7, 3514 Rscratch4 = R10_ARG8, 3515 Rtable_addr = Rscratch2, 3516 Rinterface_klass = R5_ARG3, 3517 Rret_type = R8_ARG6, 3518 Rret_addr = Rret_type, 3519 Rindex = R6_ARG4, 3520 Rreceiver = R4_ARG2, 3521 Rrecv_klass = Rreceiver, 3522 Rflags = R7_ARG5; 3523 3524 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3525 3526 // Get receiver klass. 3527 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3528 __ load_klass(Rrecv_klass, Rreceiver); 3529 3530 // Check corner case object method. 3531 Label LobjectMethod; 3532 3533 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3534 __ btrue(CCR0, LobjectMethod); 3535 3536 // Fallthrough: The normal invokeinterface case. 3537 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3538 3539 // Find entry point to call. 3540 Label Lthrow_icc, Lthrow_ame; 3541 // Result will be returned in Rindex. 3542 __ mr(Rscratch4, Rrecv_klass); 3543 __ mr(Rscratch3, Rindex); 3544 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3545 3546 __ cmpdi(CCR0, Rindex, 0); 3547 __ beq(CCR0, Lthrow_ame); 3548 // Found entry. Jump off! 3549 // Argument and return type profiling. 3550 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); 3551 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3552 3553 // Vtable entry was NULL => Throw abstract method error. 3554 __ bind(Lthrow_ame); 3555 __ mr(Rrecv_klass, Rscratch4); 3556 __ mr(Rindex, Rscratch3); 3557 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3558 3559 // Interface was not found => Throw incompatible class change error. 3560 __ bind(Lthrow_icc); 3561 __ mr(Rrecv_klass, Rscratch4); 3562 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3563 3564 __ should_not_reach_here(); 3565 3566 // Special case of invokeinterface called for virtual method of 3567 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3568 // The invokeinterface was rewritten to a invokevirtual, hence we have 3569 // to handle this corner case. This code isn't produced by javac, but could 3570 // be produced by another compliant java compiler. 3571 __ bind(LobjectMethod); 3572 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3573 } 3574 3575 void TemplateTable::invokedynamic(int byte_no) { 3576 transition(vtos, vtos); 3577 3578 const Register Rret_addr = R3_ARG1, 3579 Rflags = R4_ARG2, 3580 Rmethod = R22_tmp2, 3581 Rscratch1 = R11_scratch1, 3582 Rscratch2 = R12_scratch2; 3583 3584 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3585 3586 // Profile this call. 3587 __ profile_call(Rscratch1, Rscratch2); 3588 3589 // Off we go. With the new method handles, we don't jump to a method handle 3590 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3591 // to be the callsite object the bootstrap method returned. This is passed to a 3592 // "link" method which does the dispatch (Most likely just grabs the MH stored 3593 // inside the callsite and does an invokehandle). 3594 // Argument and return type profiling. 3595 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3596 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3597 } 3598 3599 void TemplateTable::invokehandle(int byte_no) { 3600 transition(vtos, vtos); 3601 3602 const Register Rret_addr = R3_ARG1, 3603 Rflags = R4_ARG2, 3604 Rrecv = R5_ARG3, 3605 Rmethod = R22_tmp2, 3606 Rscratch1 = R11_scratch1, 3607 Rscratch2 = R12_scratch2; 3608 3609 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3610 __ verify_method_ptr(Rmethod); 3611 __ null_check_throw(Rrecv, -1, Rscratch2); 3612 3613 __ profile_final_call(Rrecv, Rscratch1); 3614 3615 // Still no call from handle => We call the method handle interpreter here. 3616 // Argument and return type profiling. 3617 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3618 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3619 } 3620 3621 // ============================================================================= 3622 // Allocation 3623 3624 // Puts allocated obj ref onto the expression stack. 3625 void TemplateTable::_new() { 3626 transition(vtos, atos); 3627 3628 Label Lslow_case, 3629 Ldone, 3630 Linitialize_header, 3631 Lallocate_shared, 3632 Linitialize_object; // Including clearing the fields. 3633 3634 const Register RallocatedObject = R17_tos, 3635 RinstanceKlass = R9_ARG7, 3636 Rscratch = R11_scratch1, 3637 Roffset = R8_ARG6, 3638 Rinstance_size = Roffset, 3639 Rcpool = R4_ARG2, 3640 Rtags = R3_ARG1, 3641 Rindex = R5_ARG3; 3642 3643 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); 3644 3645 // -------------------------------------------------------------------------- 3646 // Check if fast case is possible. 3647 3648 // Load pointers to const pool and const pool's tags array. 3649 __ get_cpool_and_tags(Rcpool, Rtags); 3650 // Load index of constant pool entry. 3651 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3652 3653 if (UseTLAB) { 3654 // Make sure the class we're about to instantiate has been resolved 3655 // This is done before loading instanceKlass to be consistent with the order 3656 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3657 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3658 __ lbzx(Rtags, Rindex, Rtags); 3659 3660 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3661 __ bne(CCR0, Lslow_case); 3662 3663 // Get instanceKlass 3664 __ sldi(Roffset, Rindex, LogBytesPerWord); 3665 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass); 3666 3667 // Make sure klass is fully initialized and get instance_size. 3668 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3669 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3670 3671 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3672 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3673 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3674 3675 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3676 __ beq(CCR0, Lslow_case); 3677 3678 // -------------------------------------------------------------------------- 3679 // Fast case: 3680 // Allocate the instance. 3681 // 1) Try to allocate in the TLAB. 3682 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3683 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3684 3685 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3686 Register RnewTopValue = R6_ARG4; 3687 Register RendValue = R7_ARG5; 3688 3689 // Check if we can allocate in the TLAB. 3690 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3691 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3692 3693 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3694 3695 // If there is enough space, we do not CAS and do not clear. 3696 __ cmpld(CCR0, RnewTopValue, RendValue); 3697 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3698 3699 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3700 3701 if (ZeroTLAB) { 3702 // The fields have already been cleared. 3703 __ b(Linitialize_header); 3704 } else { 3705 // Initialize both the header and fields. 3706 __ b(Linitialize_object); 3707 } 3708 3709 // Fall through: TLAB was too small. 3710 if (allow_shared_alloc) { 3711 Register RtlabWasteLimitValue = R10_ARG8; 3712 Register RfreeValue = RnewTopValue; 3713 3714 __ bind(Lallocate_shared); 3715 // Check if tlab should be discarded (refill_waste_limit >= free). 3716 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3717 __ subf(RfreeValue, RoldTopValue, RendValue); 3718 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3719 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3720 __ bge(CCR0, Lslow_case); 3721 3722 // Increment waste limit to prevent getting stuck on this slow path. 3723 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3724 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3725 } 3726 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3727 } 3728 // else: Always go the slow path. 3729 3730 // -------------------------------------------------------------------------- 3731 // slow case 3732 __ bind(Lslow_case); 3733 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3734 3735 if (UseTLAB) { 3736 __ b(Ldone); 3737 // -------------------------------------------------------------------------- 3738 // Init1: Zero out newly allocated memory. 3739 3740 if (!ZeroTLAB || allow_shared_alloc) { 3741 // Clear object fields. 3742 __ bind(Linitialize_object); 3743 3744 // Initialize remaining object fields. 3745 Register Rbase = Rtags; 3746 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3747 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3748 __ srdi(Rinstance_size, Rinstance_size, 3); 3749 3750 // Clear out object skipping header. Takes also care of the zero length case. 3751 __ clear_memory_doubleword(Rbase, Rinstance_size); 3752 // fallthru: __ b(Linitialize_header); 3753 } 3754 3755 // -------------------------------------------------------------------------- 3756 // Init2: Initialize the header: mark, klass 3757 __ bind(Linitialize_header); 3758 3759 // Init mark. 3760 if (UseBiasedLocking) { 3761 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3762 } else { 3763 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3764 } 3765 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3766 3767 // Init klass. 3768 __ store_klass_gap(RallocatedObject); 3769 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3770 3771 // Check and trigger dtrace event. 3772 { 3773 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3774 __ push(atos); 3775 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3776 __ pop(atos); 3777 } 3778 } 3779 3780 // continue 3781 __ bind(Ldone); 3782 3783 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3784 __ membar(Assembler::StoreStore); 3785 } 3786 3787 void TemplateTable::newarray() { 3788 transition(itos, atos); 3789 3790 __ lbz(R4, 1, R14_bcp); 3791 __ extsw(R5, R17_tos); 3792 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3793 3794 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3795 __ membar(Assembler::StoreStore); 3796 } 3797 3798 void TemplateTable::anewarray() { 3799 transition(itos, atos); 3800 3801 __ get_constant_pool(R4); 3802 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3803 __ extsw(R6, R17_tos); // size 3804 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3805 3806 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3807 __ membar(Assembler::StoreStore); 3808 } 3809 3810 // Allocate a multi dimensional array 3811 void TemplateTable::multianewarray() { 3812 transition(vtos, atos); 3813 3814 Register Rptr = R31; // Needs to survive C call. 3815 3816 // Put ndims * wordSize into frame temp slot 3817 __ lbz(Rptr, 3, R14_bcp); 3818 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3819 // Esp points past last_dim, so set to R4 to first_dim address. 3820 __ add(R4, Rptr, R15_esp); 3821 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3822 // Pop all dimensions off the stack. 3823 __ add(R15_esp, Rptr, R15_esp); 3824 3825 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3826 __ membar(Assembler::StoreStore); 3827 } 3828 3829 void TemplateTable::arraylength() { 3830 transition(atos, itos); 3831 3832 Label LnoException; 3833 __ verify_oop(R17_tos); 3834 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3835 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3836 } 3837 3838 // ============================================================================ 3839 // Typechecks 3840 3841 void TemplateTable::checkcast() { 3842 transition(atos, atos); 3843 3844 Label Ldone, Lis_null, Lquicked, Lresolved; 3845 Register Roffset = R6_ARG4, 3846 RobjKlass = R4_ARG2, 3847 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3848 Rcpool = R11_scratch1, 3849 Rtags = R12_scratch2; 3850 3851 // Null does not pass. 3852 __ cmpdi(CCR0, R17_tos, 0); 3853 __ beq(CCR0, Lis_null); 3854 3855 // Get constant pool tag to find out if the bytecode has already been "quickened". 3856 __ get_cpool_and_tags(Rcpool, Rtags); 3857 3858 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3859 3860 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3861 __ lbzx(Rtags, Rtags, Roffset); 3862 3863 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3864 __ beq(CCR0, Lquicked); 3865 3866 // Call into the VM to "quicken" instanceof. 3867 __ push_ptr(); // for GC 3868 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3869 __ get_vm_result_2(RspecifiedKlass); 3870 __ pop_ptr(); // Restore receiver. 3871 __ b(Lresolved); 3872 3873 // Extract target class from constant pool. 3874 __ bind(Lquicked); 3875 __ sldi(Roffset, Roffset, LogBytesPerWord); 3876 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3877 3878 // Do the checkcast. 3879 __ bind(Lresolved); 3880 // Get value klass in RobjKlass. 3881 __ load_klass(RobjKlass, R17_tos); 3882 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3883 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3884 3885 // Not a subtype; so must throw exception 3886 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3887 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3888 __ mtctr(R11_scratch1); 3889 __ bctr(); 3890 3891 // Profile the null case. 3892 __ align(32, 12); 3893 __ bind(Lis_null); 3894 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3895 3896 __ align(32, 12); 3897 __ bind(Ldone); 3898 } 3899 3900 // Output: 3901 // - tos == 0: Obj was null or not an instance of class. 3902 // - tos == 1: Obj was an instance of class. 3903 void TemplateTable::instanceof() { 3904 transition(atos, itos); 3905 3906 Label Ldone, Lis_null, Lquicked, Lresolved; 3907 Register Roffset = R6_ARG4, 3908 RobjKlass = R4_ARG2, 3909 RspecifiedKlass = R5_ARG3, 3910 Rcpool = R11_scratch1, 3911 Rtags = R12_scratch2; 3912 3913 // Null does not pass. 3914 __ cmpdi(CCR0, R17_tos, 0); 3915 __ beq(CCR0, Lis_null); 3916 3917 // Get constant pool tag to find out if the bytecode has already been "quickened". 3918 __ get_cpool_and_tags(Rcpool, Rtags); 3919 3920 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3921 3922 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3923 __ lbzx(Rtags, Rtags, Roffset); 3924 3925 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3926 __ beq(CCR0, Lquicked); 3927 3928 // Call into the VM to "quicken" instanceof. 3929 __ push_ptr(); // for GC 3930 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3931 __ get_vm_result_2(RspecifiedKlass); 3932 __ pop_ptr(); // Restore receiver. 3933 __ b(Lresolved); 3934 3935 // Extract target class from constant pool. 3936 __ bind(Lquicked); 3937 __ sldi(Roffset, Roffset, LogBytesPerWord); 3938 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3939 3940 // Do the checkcast. 3941 __ bind(Lresolved); 3942 // Get value klass in RobjKlass. 3943 __ load_klass(RobjKlass, R17_tos); 3944 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3945 __ li(R17_tos, 1); 3946 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3947 __ li(R17_tos, 0); 3948 3949 if (ProfileInterpreter) { 3950 __ b(Ldone); 3951 } 3952 3953 // Profile the null case. 3954 __ align(32, 12); 3955 __ bind(Lis_null); 3956 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3957 3958 __ align(32, 12); 3959 __ bind(Ldone); 3960 } 3961 3962 // ============================================================================= 3963 // Breakpoints 3964 3965 void TemplateTable::_breakpoint() { 3966 transition(vtos, vtos); 3967 3968 // Get the unpatched byte code. 3969 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3970 __ mr(R31, R3_RET); 3971 3972 // Post the breakpoint event. 3973 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3974 3975 // Complete the execution of original bytecode. 3976 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3977 } 3978 3979 // ============================================================================= 3980 // Exceptions 3981 3982 void TemplateTable::athrow() { 3983 transition(atos, vtos); 3984 3985 // Exception oop is in tos 3986 __ verify_oop(R17_tos); 3987 3988 __ null_check_throw(R17_tos, -1, R11_scratch1); 3989 3990 // Throw exception interpreter entry expects exception oop to be in R3. 3991 __ mr(R3_RET, R17_tos); 3992 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3993 __ mtctr(R11_scratch1); 3994 __ bctr(); 3995 } 3996 3997 // ============================================================================= 3998 // Synchronization 3999 // Searches the basic object lock list on the stack for a free slot 4000 // and uses it to lock the obect in tos. 4001 // 4002 // Recursive locking is enabled by exiting the search if the same 4003 // object is already found in the list. Thus, a new basic lock obj lock 4004 // is allocated "higher up" in the stack and thus is found first 4005 // at next monitor exit. 4006 void TemplateTable::monitorenter() { 4007 transition(atos, vtos); 4008 4009 __ verify_oop(R17_tos); 4010 4011 Register Rcurrent_monitor = R11_scratch1, 4012 Rcurrent_obj = R12_scratch2, 4013 Robj_to_lock = R17_tos, 4014 Rscratch1 = R3_ARG1, 4015 Rscratch2 = R4_ARG2, 4016 Rscratch3 = R5_ARG3, 4017 Rcurrent_obj_addr = R6_ARG4; 4018 4019 // ------------------------------------------------------------------------------ 4020 // Null pointer exception. 4021 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4022 4023 // Try to acquire a lock on the object. 4024 // Repeat until succeeded (i.e., until monitorenter returns true). 4025 4026 // ------------------------------------------------------------------------------ 4027 // Find a free slot in the monitor block. 4028 Label Lfound, Lexit, Lallocate_new; 4029 ConditionRegister found_free_slot = CCR0, 4030 found_same_obj = CCR1, 4031 reached_limit = CCR6; 4032 { 4033 Label Lloop, Lentry; 4034 Register Rlimit = Rcurrent_monitor; 4035 4036 // Set up search loop - start with topmost monitor. 4037 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4038 4039 __ ld(Rlimit, 0, R1_SP); 4040 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4041 4042 // Check if any slot is present => short cut to allocation if not. 4043 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4044 __ bgt(reached_limit, Lallocate_new); 4045 4046 // Pre-load topmost slot. 4047 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4048 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4049 // The search loop. 4050 __ bind(Lloop); 4051 // Found free slot? 4052 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4053 // Is this entry for same obj? If so, stop the search and take the found 4054 // free slot or allocate a new one to enable recursive locking. 4055 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4056 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4057 __ beq(found_free_slot, Lexit); 4058 __ beq(found_same_obj, Lallocate_new); 4059 __ bgt(reached_limit, Lallocate_new); 4060 // Check if last allocated BasicLockObj reached. 4061 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4062 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4063 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4064 __ b(Lloop); 4065 } 4066 4067 // ------------------------------------------------------------------------------ 4068 // Check if we found a free slot. 4069 __ bind(Lexit); 4070 4071 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4072 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4073 __ b(Lfound); 4074 4075 // We didn't find a free BasicObjLock => allocate one. 4076 __ align(32, 12); 4077 __ bind(Lallocate_new); 4078 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4079 __ mr(Rcurrent_monitor, R26_monitor); 4080 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4081 4082 // ------------------------------------------------------------------------------ 4083 // We now have a slot to lock. 4084 __ bind(Lfound); 4085 4086 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4087 // The object has already been poped from the stack, so the expression stack looks correct. 4088 __ addi(R14_bcp, R14_bcp, 1); 4089 4090 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4091 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4092 4093 // Check if there's enough space on the stack for the monitors after locking. 4094 // This emits a single store. 4095 __ generate_stack_overflow_check(0); 4096 4097 // The bcp has already been incremented. Just need to dispatch to next instruction. 4098 __ dispatch_next(vtos); 4099 } 4100 4101 void TemplateTable::monitorexit() { 4102 transition(atos, vtos); 4103 __ verify_oop(R17_tos); 4104 4105 Register Rcurrent_monitor = R11_scratch1, 4106 Rcurrent_obj = R12_scratch2, 4107 Robj_to_lock = R17_tos, 4108 Rcurrent_obj_addr = R3_ARG1, 4109 Rlimit = R4_ARG2; 4110 Label Lfound, Lillegal_monitor_state; 4111 4112 // Check corner case: unbalanced monitorEnter / Exit. 4113 __ ld(Rlimit, 0, R1_SP); 4114 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4115 4116 // Null pointer check. 4117 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4118 4119 __ cmpld(CCR0, R26_monitor, Rlimit); 4120 __ bgt(CCR0, Lillegal_monitor_state); 4121 4122 // Find the corresponding slot in the monitors stack section. 4123 { 4124 Label Lloop; 4125 4126 // Start with topmost monitor. 4127 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4128 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4129 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4130 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4131 4132 __ bind(Lloop); 4133 // Is this entry for same obj? 4134 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4135 __ beq(CCR0, Lfound); 4136 4137 // Check if last allocated BasicLockObj reached. 4138 4139 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4140 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4141 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4142 4143 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4144 __ ble(CCR0, Lloop); 4145 } 4146 4147 // Fell through without finding the basic obj lock => throw up! 4148 __ bind(Lillegal_monitor_state); 4149 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4150 __ should_not_reach_here(); 4151 4152 __ align(32, 12); 4153 __ bind(Lfound); 4154 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4155 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4156 __ unlock_object(Rcurrent_monitor); 4157 } 4158 4159 // ============================================================================ 4160 // Wide bytecodes 4161 4162 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4163 void TemplateTable::wide() { 4164 transition(vtos, vtos); 4165 4166 const Register Rtable = R11_scratch1, 4167 Rindex = R12_scratch2, 4168 Rtmp = R0; 4169 4170 __ lbz(Rindex, 1, R14_bcp); 4171 4172 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4173 4174 __ slwi(Rindex, Rindex, LogBytesPerWord); 4175 __ ldx(Rtmp, Rtable, Rindex); 4176 __ mtctr(Rtmp); 4177 __ bctr(); 4178 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4179 }