1 /* 2 * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.inline.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #ifndef CC_INTERP 43 44 #undef __ 45 #define __ _masm-> 46 47 // ============================================================================ 48 // Misc helpers 49 50 // Do an oop store like *(base + index) = val OR *(base + offset) = val 51 // (only one of both variants is possible at the same time). 52 // Index can be noreg. 53 // Kills: 54 // Rbase, Rtmp 55 static void do_oop_store(InterpreterMacroAssembler* _masm, 56 Register Rbase, 57 RegisterOrConstant offset, 58 Register Rval, // Noreg means always null. 59 Register Rtmp1, 60 Register Rtmp2, 61 Register Rtmp3, 62 BarrierSet::Name barrier, 63 bool precise, 64 bool check_null) { 65 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 66 67 switch (barrier) { 68 #if INCLUDE_ALL_GCS 69 case BarrierSet::G1SATBCTLogging: 70 { 71 // Load and record the previous value. 72 __ g1_write_barrier_pre(Rbase, offset, 73 Rtmp3, /* holder of pre_val ? */ 74 Rtmp1, Rtmp2, false /* frame */); 75 76 Label Lnull, Ldone; 77 if (Rval != noreg) { 78 if (check_null) { 79 __ cmpdi(CCR0, Rval, 0); 80 __ beq(CCR0, Lnull); 81 } 82 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 83 // Mark the card. 84 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 85 __ add(Rbase, offset, Rbase); 86 } 87 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 88 if (check_null) { __ b(Ldone); } 89 } 90 91 if (Rval == noreg || check_null) { // Store null oop. 92 Register Rnull = Rval; 93 __ bind(Lnull); 94 if (Rval == noreg) { 95 Rnull = Rtmp1; 96 __ li(Rnull, 0); 97 } 98 if (UseCompressedOops) { 99 __ stw(Rnull, offset, Rbase); 100 } else { 101 __ std(Rnull, offset, Rbase); 102 } 103 } 104 __ bind(Ldone); 105 } 106 break; 107 #endif // INCLUDE_ALL_GCS 108 case BarrierSet::CardTableModRef: 109 case BarrierSet::CardTableExtension: 110 { 111 Label Lnull, Ldone; 112 if (Rval != noreg) { 113 if (check_null) { 114 __ cmpdi(CCR0, Rval, 0); 115 __ beq(CCR0, Lnull); 116 } 117 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 118 // Mark the card. 119 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 120 __ add(Rbase, offset, Rbase); 121 } 122 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 123 if (check_null) { 124 __ b(Ldone); 125 } 126 } 127 128 if (Rval == noreg || check_null) { // Store null oop. 129 Register Rnull = Rval; 130 __ bind(Lnull); 131 if (Rval == noreg) { 132 Rnull = Rtmp1; 133 __ li(Rnull, 0); 134 } 135 if (UseCompressedOops) { 136 __ stw(Rnull, offset, Rbase); 137 } else { 138 __ std(Rnull, offset, Rbase); 139 } 140 } 141 __ bind(Ldone); 142 } 143 break; 144 case BarrierSet::ModRef: 145 ShouldNotReachHere(); 146 break; 147 default: 148 ShouldNotReachHere(); 149 } 150 } 151 152 // ============================================================================ 153 // Platform-dependent initialization 154 155 void TemplateTable::pd_initialize() { 156 // No ppc64 specific initialization. 157 } 158 159 Address TemplateTable::at_bcp(int offset) { 160 // Not used on ppc. 161 ShouldNotReachHere(); 162 return Address(); 163 } 164 165 // Patches the current bytecode (ptr to it located in bcp) 166 // in the bytecode stream with a new one. 167 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 168 // With sharing on, may need to test method flag. 169 if (!RewriteBytecodes) return; 170 Label L_patch_done; 171 172 switch (new_bc) { 173 case Bytecodes::_fast_aputfield: 174 case Bytecodes::_fast_bputfield: 175 case Bytecodes::_fast_cputfield: 176 case Bytecodes::_fast_dputfield: 177 case Bytecodes::_fast_fputfield: 178 case Bytecodes::_fast_iputfield: 179 case Bytecodes::_fast_lputfield: 180 case Bytecodes::_fast_sputfield: 181 { 182 // We skip bytecode quickening for putfield instructions when 183 // the put_code written to the constant pool cache is zero. 184 // This is required so that every execution of this instruction 185 // calls out to InterpreterRuntime::resolve_get_put to do 186 // additional, required work. 187 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 188 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 189 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 190 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 191 #if defined(VM_LITTLE_ENDIAN) 192 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 193 #else 194 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 195 #endif 196 __ cmpwi(CCR0, Rnew_bc, 0); 197 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 198 __ beq(CCR0, L_patch_done); 199 // __ isync(); // acquire not needed 200 break; 201 } 202 203 default: 204 assert(byte_no == -1, "sanity"); 205 if (load_bc_into_bc_reg) { 206 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 207 } 208 } 209 210 if (JvmtiExport::can_post_breakpoint()) { 211 Label L_fast_patch; 212 __ lbz(Rtemp, 0, R14_bcp); 213 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 214 __ bne(CCR0, L_fast_patch); 215 // Perform the quickening, slowly, in the bowels of the breakpoint table. 216 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 217 __ b(L_patch_done); 218 __ bind(L_fast_patch); 219 } 220 221 // Patch bytecode. 222 __ stb(Rnew_bc, 0, R14_bcp); 223 224 __ bind(L_patch_done); 225 } 226 227 // ============================================================================ 228 // Individual instructions 229 230 void TemplateTable::nop() { 231 transition(vtos, vtos); 232 // Nothing to do. 233 } 234 235 void TemplateTable::shouldnotreachhere() { 236 transition(vtos, vtos); 237 __ stop("shouldnotreachhere bytecode"); 238 } 239 240 void TemplateTable::aconst_null() { 241 transition(vtos, atos); 242 __ li(R17_tos, 0); 243 } 244 245 void TemplateTable::iconst(int value) { 246 transition(vtos, itos); 247 assert(value >= -1 && value <= 5, ""); 248 __ li(R17_tos, value); 249 } 250 251 void TemplateTable::lconst(int value) { 252 transition(vtos, ltos); 253 assert(value >= -1 && value <= 5, ""); 254 __ li(R17_tos, value); 255 } 256 257 void TemplateTable::fconst(int value) { 258 transition(vtos, ftos); 259 static float zero = 0.0; 260 static float one = 1.0; 261 static float two = 2.0; 262 switch (value) { 263 default: ShouldNotReachHere(); 264 case 0: { 265 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 266 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 267 break; 268 } 269 case 1: { 270 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 271 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 272 break; 273 } 274 case 2: { 275 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 276 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 277 break; 278 } 279 } 280 } 281 282 void TemplateTable::dconst(int value) { 283 transition(vtos, dtos); 284 static double zero = 0.0; 285 static double one = 1.0; 286 switch (value) { 287 case 0: { 288 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 289 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 290 break; 291 } 292 case 1: { 293 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 294 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 295 break; 296 } 297 default: ShouldNotReachHere(); 298 } 299 } 300 301 void TemplateTable::bipush() { 302 transition(vtos, itos); 303 __ lbz(R17_tos, 1, R14_bcp); 304 __ extsb(R17_tos, R17_tos); 305 } 306 307 void TemplateTable::sipush() { 308 transition(vtos, itos); 309 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 310 } 311 312 void TemplateTable::ldc(bool wide) { 313 Register Rscratch1 = R11_scratch1, 314 Rscratch2 = R12_scratch2, 315 Rcpool = R3_ARG1; 316 317 transition(vtos, vtos); 318 Label notInt, notClass, exit; 319 320 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 321 if (wide) { // Read index. 322 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 323 } else { 324 __ lbz(Rscratch1, 1, R14_bcp); 325 } 326 327 const int base_offset = ConstantPool::header_size() * wordSize; 328 const int tags_offset = Array<u1>::base_offset_in_bytes(); 329 330 // Get type from tags. 331 __ addi(Rscratch2, Rscratch2, tags_offset); 332 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 333 334 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 335 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 336 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 337 338 // Resolved class - need to call vm to get java mirror of the class. 339 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 340 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 341 __ beq(CCR0, notClass); 342 343 __ li(R4, wide ? 1 : 0); 344 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 345 __ push(atos); 346 __ b(exit); 347 348 __ align(32, 12); 349 __ bind(notClass); 350 __ addi(Rcpool, Rcpool, base_offset); 351 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 352 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 353 __ bne(CCR0, notInt); 354 __ lwax(R17_tos, Rcpool, Rscratch1); 355 __ push(itos); 356 __ b(exit); 357 358 __ align(32, 12); 359 __ bind(notInt); 360 #ifdef ASSERT 361 // String and Object are rewritten to fast_aldc 362 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 363 __ asm_assert_eq("unexpected type", 0x8765); 364 #endif 365 __ lfsx(F15_ftos, Rcpool, Rscratch1); 366 __ push(ftos); 367 368 __ align(32, 12); 369 __ bind(exit); 370 } 371 372 // Fast path for caching oop constants. 373 void TemplateTable::fast_aldc(bool wide) { 374 transition(vtos, atos); 375 376 int index_size = wide ? sizeof(u2) : sizeof(u1); 377 const Register Rscratch = R11_scratch1; 378 Label resolved; 379 380 // We are resolved if the resolved reference cache entry contains a 381 // non-null object (CallSite, etc.) 382 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 383 __ load_resolved_reference_at_index(R17_tos, Rscratch); 384 __ cmpdi(CCR0, R17_tos, 0); 385 __ bne(CCR0, resolved); 386 __ load_const_optimized(R3_ARG1, (int)bytecode()); 387 388 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 389 390 // First time invocation - must resolve first. 391 __ call_VM(R17_tos, entry, R3_ARG1); 392 393 __ align(32, 12); 394 __ bind(resolved); 395 __ verify_oop(R17_tos); 396 } 397 398 void TemplateTable::ldc2_w() { 399 transition(vtos, vtos); 400 Label Llong, Lexit; 401 402 Register Rindex = R11_scratch1, 403 Rcpool = R12_scratch2, 404 Rtag = R3_ARG1; 405 __ get_cpool_and_tags(Rcpool, Rtag); 406 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 407 408 const int base_offset = ConstantPool::header_size() * wordSize; 409 const int tags_offset = Array<u1>::base_offset_in_bytes(); 410 // Get type from tags. 411 __ addi(Rcpool, Rcpool, base_offset); 412 __ addi(Rtag, Rtag, tags_offset); 413 414 __ lbzx(Rtag, Rtag, Rindex); 415 416 __ sldi(Rindex, Rindex, LogBytesPerWord); 417 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 418 __ bne(CCR0, Llong); 419 // A double can be placed at word-aligned locations in the constant pool. 420 // Check out Conversions.java for an example. 421 // Also ConstantPool::header_size() is 20, which makes it very difficult 422 // to double-align double on the constant pool. SG, 11/7/97 423 __ lfdx(F15_ftos, Rcpool, Rindex); 424 __ push(dtos); 425 __ b(Lexit); 426 427 __ bind(Llong); 428 __ ldx(R17_tos, Rcpool, Rindex); 429 __ push(ltos); 430 431 __ bind(Lexit); 432 } 433 434 // Get the locals index located in the bytecode stream at bcp + offset. 435 void TemplateTable::locals_index(Register Rdst, int offset) { 436 __ lbz(Rdst, offset, R14_bcp); 437 } 438 439 void TemplateTable::iload() { 440 transition(vtos, itos); 441 442 // Get the local value into tos 443 const Register Rindex = R22_tmp2; 444 locals_index(Rindex); 445 446 // Rewrite iload,iload pair into fast_iload2 447 // iload,caload pair into fast_icaload 448 if (RewriteFrequentPairs) { 449 Label Lrewrite, Ldone; 450 Register Rnext_byte = R3_ARG1, 451 Rrewrite_to = R6_ARG4, 452 Rscratch = R11_scratch1; 453 454 // get next byte 455 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 456 457 // if _iload, wait to rewrite to iload2. We only want to rewrite the 458 // last two iloads in a pair. Comparing against fast_iload means that 459 // the next bytecode is neither an iload or a caload, and therefore 460 // an iload pair. 461 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 462 __ beq(CCR0, Ldone); 463 464 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 465 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 466 __ beq(CCR1, Lrewrite); 467 468 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 469 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 470 __ beq(CCR0, Lrewrite); 471 472 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 473 474 __ bind(Lrewrite); 475 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 476 __ bind(Ldone); 477 } 478 479 __ load_local_int(R17_tos, Rindex, Rindex); 480 } 481 482 // Load 2 integers in a row without dispatching 483 void TemplateTable::fast_iload2() { 484 transition(vtos, itos); 485 486 __ lbz(R3_ARG1, 1, R14_bcp); 487 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 488 489 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 490 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 491 __ push_i(R3_ARG1); 492 } 493 494 void TemplateTable::fast_iload() { 495 transition(vtos, itos); 496 // Get the local value into tos 497 498 const Register Rindex = R11_scratch1; 499 locals_index(Rindex); 500 __ load_local_int(R17_tos, Rindex, Rindex); 501 } 502 503 // Load a local variable type long from locals area to TOS cache register. 504 // Local index resides in bytecodestream. 505 void TemplateTable::lload() { 506 transition(vtos, ltos); 507 508 const Register Rindex = R11_scratch1; 509 locals_index(Rindex); 510 __ load_local_long(R17_tos, Rindex, Rindex); 511 } 512 513 void TemplateTable::fload() { 514 transition(vtos, ftos); 515 516 const Register Rindex = R11_scratch1; 517 locals_index(Rindex); 518 __ load_local_float(F15_ftos, Rindex, Rindex); 519 } 520 521 void TemplateTable::dload() { 522 transition(vtos, dtos); 523 524 const Register Rindex = R11_scratch1; 525 locals_index(Rindex); 526 __ load_local_double(F15_ftos, Rindex, Rindex); 527 } 528 529 void TemplateTable::aload() { 530 transition(vtos, atos); 531 532 const Register Rindex = R11_scratch1; 533 locals_index(Rindex); 534 __ load_local_ptr(R17_tos, Rindex, Rindex); 535 } 536 537 void TemplateTable::locals_index_wide(Register Rdst) { 538 // Offset is 2, not 1, because Lbcp points to wide prefix code. 539 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 540 } 541 542 void TemplateTable::wide_iload() { 543 // Get the local value into tos. 544 545 const Register Rindex = R11_scratch1; 546 locals_index_wide(Rindex); 547 __ load_local_int(R17_tos, Rindex, Rindex); 548 } 549 550 void TemplateTable::wide_lload() { 551 transition(vtos, ltos); 552 553 const Register Rindex = R11_scratch1; 554 locals_index_wide(Rindex); 555 __ load_local_long(R17_tos, Rindex, Rindex); 556 } 557 558 void TemplateTable::wide_fload() { 559 transition(vtos, ftos); 560 561 const Register Rindex = R11_scratch1; 562 locals_index_wide(Rindex); 563 __ load_local_float(F15_ftos, Rindex, Rindex); 564 } 565 566 void TemplateTable::wide_dload() { 567 transition(vtos, dtos); 568 569 const Register Rindex = R11_scratch1; 570 locals_index_wide(Rindex); 571 __ load_local_double(F15_ftos, Rindex, Rindex); 572 } 573 574 void TemplateTable::wide_aload() { 575 transition(vtos, atos); 576 577 const Register Rindex = R11_scratch1; 578 locals_index_wide(Rindex); 579 __ load_local_ptr(R17_tos, Rindex, Rindex); 580 } 581 582 void TemplateTable::iaload() { 583 transition(itos, itos); 584 585 const Register Rload_addr = R3_ARG1, 586 Rarray = R4_ARG2, 587 Rtemp = R5_ARG3; 588 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 589 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 590 } 591 592 void TemplateTable::laload() { 593 transition(itos, ltos); 594 595 const Register Rload_addr = R3_ARG1, 596 Rarray = R4_ARG2, 597 Rtemp = R5_ARG3; 598 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 599 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 600 } 601 602 void TemplateTable::faload() { 603 transition(itos, ftos); 604 605 const Register Rload_addr = R3_ARG1, 606 Rarray = R4_ARG2, 607 Rtemp = R5_ARG3; 608 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 609 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 610 } 611 612 void TemplateTable::daload() { 613 transition(itos, dtos); 614 615 const Register Rload_addr = R3_ARG1, 616 Rarray = R4_ARG2, 617 Rtemp = R5_ARG3; 618 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 619 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 620 } 621 622 void TemplateTable::aaload() { 623 transition(itos, atos); 624 625 // tos: index 626 // result tos: array 627 const Register Rload_addr = R3_ARG1, 628 Rarray = R4_ARG2, 629 Rtemp = R5_ARG3; 630 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 631 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 632 __ verify_oop(R17_tos); 633 //__ dcbt(R17_tos); // prefetch 634 } 635 636 void TemplateTable::baload() { 637 transition(itos, itos); 638 639 const Register Rload_addr = R3_ARG1, 640 Rarray = R4_ARG2, 641 Rtemp = R5_ARG3; 642 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 643 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 644 __ extsb(R17_tos, R17_tos); 645 } 646 647 void TemplateTable::caload() { 648 transition(itos, itos); 649 650 const Register Rload_addr = R3_ARG1, 651 Rarray = R4_ARG2, 652 Rtemp = R5_ARG3; 653 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 654 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 655 } 656 657 // Iload followed by caload frequent pair. 658 void TemplateTable::fast_icaload() { 659 transition(vtos, itos); 660 661 const Register Rload_addr = R3_ARG1, 662 Rarray = R4_ARG2, 663 Rtemp = R11_scratch1; 664 665 locals_index(R17_tos); 666 __ load_local_int(R17_tos, Rtemp, R17_tos); 667 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 668 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 669 } 670 671 void TemplateTable::saload() { 672 transition(itos, itos); 673 674 const Register Rload_addr = R11_scratch1, 675 Rarray = R12_scratch2, 676 Rtemp = R3_ARG1; 677 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 678 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 679 } 680 681 void TemplateTable::iload(int n) { 682 transition(vtos, itos); 683 684 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 685 } 686 687 void TemplateTable::lload(int n) { 688 transition(vtos, ltos); 689 690 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 691 } 692 693 void TemplateTable::fload(int n) { 694 transition(vtos, ftos); 695 696 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 697 } 698 699 void TemplateTable::dload(int n) { 700 transition(vtos, dtos); 701 702 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 703 } 704 705 void TemplateTable::aload(int n) { 706 transition(vtos, atos); 707 708 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 709 } 710 711 void TemplateTable::aload_0() { 712 transition(vtos, atos); 713 // According to bytecode histograms, the pairs: 714 // 715 // _aload_0, _fast_igetfield 716 // _aload_0, _fast_agetfield 717 // _aload_0, _fast_fgetfield 718 // 719 // occur frequently. If RewriteFrequentPairs is set, the (slow) 720 // _aload_0 bytecode checks if the next bytecode is either 721 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 722 // rewrites the current bytecode into a pair bytecode; otherwise it 723 // rewrites the current bytecode into _0 that doesn't do 724 // the pair check anymore. 725 // 726 // Note: If the next bytecode is _getfield, the rewrite must be 727 // delayed, otherwise we may miss an opportunity for a pair. 728 // 729 // Also rewrite frequent pairs 730 // aload_0, aload_1 731 // aload_0, iload_1 732 // These bytecodes with a small amount of code are most profitable 733 // to rewrite. 734 735 if (RewriteFrequentPairs) { 736 737 Label Lrewrite, Ldont_rewrite; 738 Register Rnext_byte = R3_ARG1, 739 Rrewrite_to = R6_ARG4, 740 Rscratch = R11_scratch1; 741 742 // Get next byte. 743 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 744 745 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 746 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 747 __ beq(CCR0, Ldont_rewrite); 748 749 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 750 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 751 __ beq(CCR1, Lrewrite); 752 753 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 754 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 755 __ beq(CCR0, Lrewrite); 756 757 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 758 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 759 __ beq(CCR1, Lrewrite); 760 761 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 762 763 __ bind(Lrewrite); 764 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 765 __ bind(Ldont_rewrite); 766 } 767 768 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 769 aload(0); 770 } 771 772 void TemplateTable::istore() { 773 transition(itos, vtos); 774 775 const Register Rindex = R11_scratch1; 776 locals_index(Rindex); 777 __ store_local_int(R17_tos, Rindex); 778 } 779 780 void TemplateTable::lstore() { 781 transition(ltos, vtos); 782 const Register Rindex = R11_scratch1; 783 locals_index(Rindex); 784 __ store_local_long(R17_tos, Rindex); 785 } 786 787 void TemplateTable::fstore() { 788 transition(ftos, vtos); 789 790 const Register Rindex = R11_scratch1; 791 locals_index(Rindex); 792 __ store_local_float(F15_ftos, Rindex); 793 } 794 795 void TemplateTable::dstore() { 796 transition(dtos, vtos); 797 798 const Register Rindex = R11_scratch1; 799 locals_index(Rindex); 800 __ store_local_double(F15_ftos, Rindex); 801 } 802 803 void TemplateTable::astore() { 804 transition(vtos, vtos); 805 806 const Register Rindex = R11_scratch1; 807 __ pop_ptr(); 808 __ verify_oop_or_return_address(R17_tos, Rindex); 809 locals_index(Rindex); 810 __ store_local_ptr(R17_tos, Rindex); 811 } 812 813 void TemplateTable::wide_istore() { 814 transition(vtos, vtos); 815 816 const Register Rindex = R11_scratch1; 817 __ pop_i(); 818 locals_index_wide(Rindex); 819 __ store_local_int(R17_tos, Rindex); 820 } 821 822 void TemplateTable::wide_lstore() { 823 transition(vtos, vtos); 824 825 const Register Rindex = R11_scratch1; 826 __ pop_l(); 827 locals_index_wide(Rindex); 828 __ store_local_long(R17_tos, Rindex); 829 } 830 831 void TemplateTable::wide_fstore() { 832 transition(vtos, vtos); 833 834 const Register Rindex = R11_scratch1; 835 __ pop_f(); 836 locals_index_wide(Rindex); 837 __ store_local_float(F15_ftos, Rindex); 838 } 839 840 void TemplateTable::wide_dstore() { 841 transition(vtos, vtos); 842 843 const Register Rindex = R11_scratch1; 844 __ pop_d(); 845 locals_index_wide(Rindex); 846 __ store_local_double(F15_ftos, Rindex); 847 } 848 849 void TemplateTable::wide_astore() { 850 transition(vtos, vtos); 851 852 const Register Rindex = R11_scratch1; 853 __ pop_ptr(); 854 __ verify_oop_or_return_address(R17_tos, Rindex); 855 locals_index_wide(Rindex); 856 __ store_local_ptr(R17_tos, Rindex); 857 } 858 859 void TemplateTable::iastore() { 860 transition(itos, vtos); 861 862 const Register Rindex = R3_ARG1, 863 Rstore_addr = R4_ARG2, 864 Rarray = R5_ARG3, 865 Rtemp = R6_ARG4; 866 __ pop_i(Rindex); 867 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 868 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 869 } 870 871 void TemplateTable::lastore() { 872 transition(ltos, vtos); 873 874 const Register Rindex = R3_ARG1, 875 Rstore_addr = R4_ARG2, 876 Rarray = R5_ARG3, 877 Rtemp = R6_ARG4; 878 __ pop_i(Rindex); 879 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 880 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 881 } 882 883 void TemplateTable::fastore() { 884 transition(ftos, vtos); 885 886 const Register Rindex = R3_ARG1, 887 Rstore_addr = R4_ARG2, 888 Rarray = R5_ARG3, 889 Rtemp = R6_ARG4; 890 __ pop_i(Rindex); 891 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 892 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 893 } 894 895 void TemplateTable::dastore() { 896 transition(dtos, vtos); 897 898 const Register Rindex = R3_ARG1, 899 Rstore_addr = R4_ARG2, 900 Rarray = R5_ARG3, 901 Rtemp = R6_ARG4; 902 __ pop_i(Rindex); 903 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 904 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 905 } 906 907 // Pop 3 values from the stack and... 908 void TemplateTable::aastore() { 909 transition(vtos, vtos); 910 911 Label Lstore_ok, Lis_null, Ldone; 912 const Register Rindex = R3_ARG1, 913 Rarray = R4_ARG2, 914 Rscratch = R11_scratch1, 915 Rscratch2 = R12_scratch2, 916 Rarray_klass = R5_ARG3, 917 Rarray_element_klass = Rarray_klass, 918 Rvalue_klass = R6_ARG4, 919 Rstore_addr = R31; // Use register which survives VM call. 920 921 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 922 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 923 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 924 925 __ verify_oop(R17_tos); 926 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 927 // Rindex is dead! 928 Register Rscratch3 = Rindex; 929 930 // Do array store check - check for NULL value first. 931 __ cmpdi(CCR0, R17_tos, 0); 932 __ beq(CCR0, Lis_null); 933 934 __ load_klass(Rarray_klass, Rarray); 935 __ load_klass(Rvalue_klass, R17_tos); 936 937 // Do fast instanceof cache test. 938 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 939 940 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 941 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 942 943 // Fell through: subtype check failed => throw an exception. 944 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 945 __ mtctr(R11_scratch1); 946 __ bctr(); 947 948 __ bind(Lis_null); 949 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 950 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 951 __ profile_null_seen(Rscratch, Rscratch2); 952 __ b(Ldone); 953 954 // Store is OK. 955 __ bind(Lstore_ok); 956 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 957 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 958 959 __ bind(Ldone); 960 // Adjust sp (pops array, index and value). 961 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 962 } 963 964 void TemplateTable::bastore() { 965 transition(itos, vtos); 966 967 const Register Rindex = R11_scratch1, 968 Rarray = R12_scratch2, 969 Rscratch = R3_ARG1; 970 __ pop_i(Rindex); 971 // tos: val 972 // Rarray: array ptr (popped by index_check) 973 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray); 974 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 975 } 976 977 void TemplateTable::castore() { 978 transition(itos, vtos); 979 980 const Register Rindex = R11_scratch1, 981 Rarray = R12_scratch2, 982 Rscratch = R3_ARG1; 983 __ pop_i(Rindex); 984 // tos: val 985 // Rarray: array ptr (popped by index_check) 986 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 987 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 988 } 989 990 void TemplateTable::sastore() { 991 castore(); 992 } 993 994 void TemplateTable::istore(int n) { 995 transition(itos, vtos); 996 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 997 } 998 999 void TemplateTable::lstore(int n) { 1000 transition(ltos, vtos); 1001 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1002 } 1003 1004 void TemplateTable::fstore(int n) { 1005 transition(ftos, vtos); 1006 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1007 } 1008 1009 void TemplateTable::dstore(int n) { 1010 transition(dtos, vtos); 1011 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1012 } 1013 1014 void TemplateTable::astore(int n) { 1015 transition(vtos, vtos); 1016 1017 __ pop_ptr(); 1018 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1019 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1020 } 1021 1022 void TemplateTable::pop() { 1023 transition(vtos, vtos); 1024 1025 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1026 } 1027 1028 void TemplateTable::pop2() { 1029 transition(vtos, vtos); 1030 1031 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1032 } 1033 1034 void TemplateTable::dup() { 1035 transition(vtos, vtos); 1036 1037 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1038 __ push_ptr(R11_scratch1); 1039 } 1040 1041 void TemplateTable::dup_x1() { 1042 transition(vtos, vtos); 1043 1044 Register Ra = R11_scratch1, 1045 Rb = R12_scratch2; 1046 // stack: ..., a, b 1047 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1048 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1049 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1050 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1051 __ push_ptr(Rb); 1052 // stack: ..., b, a, b 1053 } 1054 1055 void TemplateTable::dup_x2() { 1056 transition(vtos, vtos); 1057 1058 Register Ra = R11_scratch1, 1059 Rb = R12_scratch2, 1060 Rc = R3_ARG1; 1061 1062 // stack: ..., a, b, c 1063 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1064 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1065 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1066 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1067 // stack: ..., c, b, c 1068 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1069 // stack: ..., c, a, c 1070 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1071 __ push_ptr(Rc); // push c 1072 // stack: ..., c, a, b, c 1073 } 1074 1075 void TemplateTable::dup2() { 1076 transition(vtos, vtos); 1077 1078 Register Ra = R11_scratch1, 1079 Rb = R12_scratch2; 1080 // stack: ..., a, b 1081 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1082 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1083 __ push_2ptrs(Ra, Rb); 1084 // stack: ..., a, b, a, b 1085 } 1086 1087 void TemplateTable::dup2_x1() { 1088 transition(vtos, vtos); 1089 1090 Register Ra = R11_scratch1, 1091 Rb = R12_scratch2, 1092 Rc = R3_ARG1; 1093 // stack: ..., a, b, c 1094 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1095 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1096 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1097 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1098 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1099 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1100 // stack: ..., b, c, a 1101 __ push_2ptrs(Rb, Rc); 1102 // stack: ..., b, c, a, b, c 1103 } 1104 1105 void TemplateTable::dup2_x2() { 1106 transition(vtos, vtos); 1107 1108 Register Ra = R11_scratch1, 1109 Rb = R12_scratch2, 1110 Rc = R3_ARG1, 1111 Rd = R4_ARG2; 1112 // stack: ..., a, b, c, d 1113 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1114 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1115 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1116 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1117 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1118 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1119 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1120 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1121 // stack: ..., c, d, a, b 1122 __ push_2ptrs(Rc, Rd); 1123 // stack: ..., c, d, a, b, c, d 1124 } 1125 1126 void TemplateTable::swap() { 1127 transition(vtos, vtos); 1128 // stack: ..., a, b 1129 1130 Register Ra = R11_scratch1, 1131 Rb = R12_scratch2; 1132 // stack: ..., a, b 1133 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1134 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1135 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1136 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1137 // stack: ..., b, a 1138 } 1139 1140 void TemplateTable::iop2(Operation op) { 1141 transition(itos, itos); 1142 1143 Register Rscratch = R11_scratch1; 1144 1145 __ pop_i(Rscratch); 1146 // tos = number of bits to shift 1147 // Rscratch = value to shift 1148 switch (op) { 1149 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1150 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1151 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1152 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1153 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1154 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1155 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1156 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1157 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1158 default: ShouldNotReachHere(); 1159 } 1160 } 1161 1162 void TemplateTable::lop2(Operation op) { 1163 transition(ltos, ltos); 1164 1165 Register Rscratch = R11_scratch1; 1166 __ pop_l(Rscratch); 1167 switch (op) { 1168 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1169 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1170 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1171 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1172 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1173 default: ShouldNotReachHere(); 1174 } 1175 } 1176 1177 void TemplateTable::idiv() { 1178 transition(itos, itos); 1179 1180 Label Lnormal, Lexception, Ldone; 1181 Register Rdividend = R11_scratch1; // Used by irem. 1182 1183 __ addi(R0, R17_tos, 1); 1184 __ cmplwi(CCR0, R0, 2); 1185 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1186 1187 __ cmpwi(CCR1, R17_tos, 0); 1188 __ beq(CCR1, Lexception); // divisor == 0 1189 1190 __ pop_i(Rdividend); 1191 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1192 __ b(Ldone); 1193 1194 __ bind(Lexception); 1195 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1196 __ mtctr(R11_scratch1); 1197 __ bctr(); 1198 1199 __ align(32, 12); 1200 __ bind(Lnormal); 1201 __ pop_i(Rdividend); 1202 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1203 __ bind(Ldone); 1204 } 1205 1206 void TemplateTable::irem() { 1207 transition(itos, itos); 1208 1209 __ mr(R12_scratch2, R17_tos); 1210 idiv(); 1211 __ mullw(R17_tos, R17_tos, R12_scratch2); 1212 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1213 } 1214 1215 void TemplateTable::lmul() { 1216 transition(ltos, ltos); 1217 1218 __ pop_l(R11_scratch1); 1219 __ mulld(R17_tos, R11_scratch1, R17_tos); 1220 } 1221 1222 void TemplateTable::ldiv() { 1223 transition(ltos, ltos); 1224 1225 Label Lnormal, Lexception, Ldone; 1226 Register Rdividend = R11_scratch1; // Used by lrem. 1227 1228 __ addi(R0, R17_tos, 1); 1229 __ cmpldi(CCR0, R0, 2); 1230 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1231 1232 __ cmpdi(CCR1, R17_tos, 0); 1233 __ beq(CCR1, Lexception); // divisor == 0 1234 1235 __ pop_l(Rdividend); 1236 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1237 __ b(Ldone); 1238 1239 __ bind(Lexception); 1240 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1241 __ mtctr(R11_scratch1); 1242 __ bctr(); 1243 1244 __ align(32, 12); 1245 __ bind(Lnormal); 1246 __ pop_l(Rdividend); 1247 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1248 __ bind(Ldone); 1249 } 1250 1251 void TemplateTable::lrem() { 1252 transition(ltos, ltos); 1253 1254 __ mr(R12_scratch2, R17_tos); 1255 ldiv(); 1256 __ mulld(R17_tos, R17_tos, R12_scratch2); 1257 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1258 } 1259 1260 void TemplateTable::lshl() { 1261 transition(itos, ltos); 1262 1263 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1264 __ pop_l(R11_scratch1); 1265 __ sld(R17_tos, R11_scratch1, R17_tos); 1266 } 1267 1268 void TemplateTable::lshr() { 1269 transition(itos, ltos); 1270 1271 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1272 __ pop_l(R11_scratch1); 1273 __ srad(R17_tos, R11_scratch1, R17_tos); 1274 } 1275 1276 void TemplateTable::lushr() { 1277 transition(itos, ltos); 1278 1279 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1280 __ pop_l(R11_scratch1); 1281 __ srd(R17_tos, R11_scratch1, R17_tos); 1282 } 1283 1284 void TemplateTable::fop2(Operation op) { 1285 transition(ftos, ftos); 1286 1287 switch (op) { 1288 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1289 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1290 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1291 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1292 case rem: 1293 __ pop_f(F1_ARG1); 1294 __ fmr(F2_ARG2, F15_ftos); 1295 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1296 __ fmr(F15_ftos, F1_RET); 1297 break; 1298 1299 default: ShouldNotReachHere(); 1300 } 1301 } 1302 1303 void TemplateTable::dop2(Operation op) { 1304 transition(dtos, dtos); 1305 1306 switch (op) { 1307 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1308 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1309 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1310 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1311 case rem: 1312 __ pop_d(F1_ARG1); 1313 __ fmr(F2_ARG2, F15_ftos); 1314 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1315 __ fmr(F15_ftos, F1_RET); 1316 break; 1317 1318 default: ShouldNotReachHere(); 1319 } 1320 } 1321 1322 // Negate the value in the TOS cache. 1323 void TemplateTable::ineg() { 1324 transition(itos, itos); 1325 1326 __ neg(R17_tos, R17_tos); 1327 } 1328 1329 // Negate the value in the TOS cache. 1330 void TemplateTable::lneg() { 1331 transition(ltos, ltos); 1332 1333 __ neg(R17_tos, R17_tos); 1334 } 1335 1336 void TemplateTable::fneg() { 1337 transition(ftos, ftos); 1338 1339 __ fneg(F15_ftos, F15_ftos); 1340 } 1341 1342 void TemplateTable::dneg() { 1343 transition(dtos, dtos); 1344 1345 __ fneg(F15_ftos, F15_ftos); 1346 } 1347 1348 // Increments a local variable in place. 1349 void TemplateTable::iinc() { 1350 transition(vtos, vtos); 1351 1352 const Register Rindex = R11_scratch1, 1353 Rincrement = R0, 1354 Rvalue = R12_scratch2; 1355 1356 locals_index(Rindex); // Load locals index from bytecode stream. 1357 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1358 __ extsb(Rincrement, Rincrement); 1359 1360 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1361 1362 __ add(Rvalue, Rincrement, Rvalue); 1363 __ stw(Rvalue, 0, Rindex); 1364 } 1365 1366 void TemplateTable::wide_iinc() { 1367 transition(vtos, vtos); 1368 1369 Register Rindex = R11_scratch1, 1370 Rlocals_addr = Rindex, 1371 Rincr = R12_scratch2; 1372 locals_index_wide(Rindex); 1373 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1374 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1375 __ add(R17_tos, Rincr, R17_tos); 1376 __ stw(R17_tos, 0, Rlocals_addr); 1377 } 1378 1379 void TemplateTable::convert() { 1380 // %%%%% Factor this first part accross platforms 1381 #ifdef ASSERT 1382 TosState tos_in = ilgl; 1383 TosState tos_out = ilgl; 1384 switch (bytecode()) { 1385 case Bytecodes::_i2l: // fall through 1386 case Bytecodes::_i2f: // fall through 1387 case Bytecodes::_i2d: // fall through 1388 case Bytecodes::_i2b: // fall through 1389 case Bytecodes::_i2c: // fall through 1390 case Bytecodes::_i2s: tos_in = itos; break; 1391 case Bytecodes::_l2i: // fall through 1392 case Bytecodes::_l2f: // fall through 1393 case Bytecodes::_l2d: tos_in = ltos; break; 1394 case Bytecodes::_f2i: // fall through 1395 case Bytecodes::_f2l: // fall through 1396 case Bytecodes::_f2d: tos_in = ftos; break; 1397 case Bytecodes::_d2i: // fall through 1398 case Bytecodes::_d2l: // fall through 1399 case Bytecodes::_d2f: tos_in = dtos; break; 1400 default : ShouldNotReachHere(); 1401 } 1402 switch (bytecode()) { 1403 case Bytecodes::_l2i: // fall through 1404 case Bytecodes::_f2i: // fall through 1405 case Bytecodes::_d2i: // fall through 1406 case Bytecodes::_i2b: // fall through 1407 case Bytecodes::_i2c: // fall through 1408 case Bytecodes::_i2s: tos_out = itos; break; 1409 case Bytecodes::_i2l: // fall through 1410 case Bytecodes::_f2l: // fall through 1411 case Bytecodes::_d2l: tos_out = ltos; break; 1412 case Bytecodes::_i2f: // fall through 1413 case Bytecodes::_l2f: // fall through 1414 case Bytecodes::_d2f: tos_out = ftos; break; 1415 case Bytecodes::_i2d: // fall through 1416 case Bytecodes::_l2d: // fall through 1417 case Bytecodes::_f2d: tos_out = dtos; break; 1418 default : ShouldNotReachHere(); 1419 } 1420 transition(tos_in, tos_out); 1421 #endif 1422 1423 // Conversion 1424 Label done; 1425 switch (bytecode()) { 1426 case Bytecodes::_i2l: 1427 __ extsw(R17_tos, R17_tos); 1428 break; 1429 1430 case Bytecodes::_l2i: 1431 // Nothing to do, we'll continue to work with the lower bits. 1432 break; 1433 1434 case Bytecodes::_i2b: 1435 __ extsb(R17_tos, R17_tos); 1436 break; 1437 1438 case Bytecodes::_i2c: 1439 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1440 break; 1441 1442 case Bytecodes::_i2s: 1443 __ extsh(R17_tos, R17_tos); 1444 break; 1445 1446 case Bytecodes::_i2d: 1447 __ extsw(R17_tos, R17_tos); 1448 case Bytecodes::_l2d: 1449 __ push_l_pop_d(); 1450 __ fcfid(F15_ftos, F15_ftos); 1451 break; 1452 1453 case Bytecodes::_i2f: 1454 __ extsw(R17_tos, R17_tos); 1455 __ push_l_pop_d(); 1456 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1457 // Comment: alternatively, load with sign extend could be done by lfiwax. 1458 __ fcfids(F15_ftos, F15_ftos); 1459 } else { 1460 __ fcfid(F15_ftos, F15_ftos); 1461 __ frsp(F15_ftos, F15_ftos); 1462 } 1463 break; 1464 1465 case Bytecodes::_l2f: 1466 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1467 __ push_l_pop_d(); 1468 __ fcfids(F15_ftos, F15_ftos); 1469 } else { 1470 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1471 __ mr(R3_ARG1, R17_tos); 1472 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1473 __ fmr(F15_ftos, F1_RET); 1474 } 1475 break; 1476 1477 case Bytecodes::_f2d: 1478 // empty 1479 break; 1480 1481 case Bytecodes::_d2f: 1482 __ frsp(F15_ftos, F15_ftos); 1483 break; 1484 1485 case Bytecodes::_d2i: 1486 case Bytecodes::_f2i: 1487 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1488 __ li(R17_tos, 0); // 0 in case of NAN 1489 __ bso(CCR0, done); 1490 __ fctiwz(F15_ftos, F15_ftos); 1491 __ push_d_pop_l(); 1492 break; 1493 1494 case Bytecodes::_d2l: 1495 case Bytecodes::_f2l: 1496 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1497 __ li(R17_tos, 0); // 0 in case of NAN 1498 __ bso(CCR0, done); 1499 __ fctidz(F15_ftos, F15_ftos); 1500 __ push_d_pop_l(); 1501 break; 1502 1503 default: ShouldNotReachHere(); 1504 } 1505 __ bind(done); 1506 } 1507 1508 // Long compare 1509 void TemplateTable::lcmp() { 1510 transition(ltos, itos); 1511 1512 const Register Rscratch = R11_scratch1; 1513 __ pop_l(Rscratch); // first operand, deeper in stack 1514 1515 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1516 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1517 __ srwi(Rscratch, R17_tos, 30); 1518 __ srawi(R17_tos, R17_tos, 31); 1519 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1520 } 1521 1522 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1523 // unordered_result == -1 => fcmpl or dcmpl 1524 // unordered_result == 1 => fcmpg or dcmpg 1525 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1526 const FloatRegister Rfirst = F0_SCRATCH, 1527 Rsecond = F15_ftos; 1528 const Register Rscratch = R11_scratch1; 1529 1530 if (is_float) { 1531 __ pop_f(Rfirst); 1532 } else { 1533 __ pop_d(Rfirst); 1534 } 1535 1536 Label Lunordered, Ldone; 1537 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1538 if (unordered_result) { 1539 __ bso(CCR0, Lunordered); 1540 } 1541 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1542 __ srwi(Rscratch, R17_tos, 30); 1543 __ srawi(R17_tos, R17_tos, 31); 1544 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1545 if (unordered_result) { 1546 __ b(Ldone); 1547 __ bind(Lunordered); 1548 __ load_const_optimized(R17_tos, unordered_result); 1549 } 1550 __ bind(Ldone); 1551 } 1552 1553 // Branch_conditional which takes TemplateTable::Condition. 1554 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1555 bool positive = false; 1556 Assembler::Condition cond = Assembler::equal; 1557 switch (cc) { 1558 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1559 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1560 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1561 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1562 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1563 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1564 default: ShouldNotReachHere(); 1565 } 1566 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1567 int bi = Assembler::bi0(crx, cond); 1568 __ bc(bo, bi, L); 1569 } 1570 1571 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1572 1573 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1574 __ verify_thread(); 1575 1576 const Register Rscratch1 = R11_scratch1, 1577 Rscratch2 = R12_scratch2, 1578 Rscratch3 = R3_ARG1, 1579 R4_counters = R4_ARG2, 1580 bumped_count = R31, 1581 Rdisp = R22_tmp2; 1582 1583 __ profile_taken_branch(Rscratch1, bumped_count); 1584 1585 // Get (wide) offset. 1586 if (is_wide) { 1587 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1588 } else { 1589 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1590 } 1591 1592 // -------------------------------------------------------------------------- 1593 // Handle all the JSR stuff here, then exit. 1594 // It's much shorter and cleaner than intermingling with the 1595 // non-JSR normal-branch stuff occurring below. 1596 if (is_jsr) { 1597 // Compute return address as bci in Otos_i. 1598 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1599 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1600 __ subf(R17_tos, Rscratch1, Rscratch2); 1601 1602 // Bump bcp to target of JSR. 1603 __ add(R14_bcp, Rdisp, R14_bcp); 1604 // Push returnAddress for "ret" on stack. 1605 __ push_ptr(R17_tos); 1606 // And away we go! 1607 __ dispatch_next(vtos); 1608 return; 1609 } 1610 1611 // -------------------------------------------------------------------------- 1612 // Normal (non-jsr) branch handling 1613 1614 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1615 if (increment_invocation_counter_for_backward_branches) { 1616 //__ unimplemented("branch invocation counter"); 1617 1618 Label Lforward; 1619 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1620 1621 // Check branch direction. 1622 __ cmpdi(CCR0, Rdisp, 0); 1623 __ bgt(CCR0, Lforward); 1624 1625 __ get_method_counters(R19_method, R4_counters, Lforward); 1626 1627 if (TieredCompilation) { 1628 Label Lno_mdo, Loverflow; 1629 const int increment = InvocationCounter::count_increment; 1630 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1631 if (ProfileInterpreter) { 1632 Register Rmdo = Rscratch1; 1633 1634 // If no method data exists, go to profile_continue. 1635 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1636 __ cmpdi(CCR0, Rmdo, 0); 1637 __ beq(CCR0, Lno_mdo); 1638 1639 // Increment backedge counter in the MDO. 1640 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1641 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1642 __ load_const_optimized(Rscratch3, mask, R0); 1643 __ addi(Rscratch2, Rscratch2, increment); 1644 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1645 __ and_(Rscratch3, Rscratch2, Rscratch3); 1646 __ bne(CCR0, Lforward); 1647 __ b(Loverflow); 1648 } 1649 1650 // If there's no MDO, increment counter in method. 1651 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1652 __ bind(Lno_mdo); 1653 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1654 __ load_const_optimized(Rscratch3, mask, R0); 1655 __ addi(Rscratch2, Rscratch2, increment); 1656 __ stw(Rscratch2, mo_bc_offs, R19_method); 1657 __ and_(Rscratch3, Rscratch2, Rscratch3); 1658 __ bne(CCR0, Lforward); 1659 1660 __ bind(Loverflow); 1661 1662 // Notify point for loop, pass branch bytecode. 1663 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1664 1665 // Was an OSR adapter generated? 1666 // O0 = osr nmethod 1667 __ cmpdi(CCR0, R3_RET, 0); 1668 __ beq(CCR0, Lforward); 1669 1670 // Has the nmethod been invalidated already? 1671 __ lbz(R0, nmethod::state_offset(), R3_RET); 1672 __ cmpwi(CCR0, R0, nmethod::in_use); 1673 __ bne(CCR0, Lforward); 1674 1675 // Migrate the interpreter frame off of the stack. 1676 // We can use all registers because we will not return to interpreter from this point. 1677 1678 // Save nmethod. 1679 const Register osr_nmethod = R31; 1680 __ mr(osr_nmethod, R3_RET); 1681 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1682 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1683 __ reset_last_Java_frame(); 1684 // OSR buffer is in ARG1. 1685 1686 // Remove the interpreter frame. 1687 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1688 1689 // Jump to the osr code. 1690 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1691 __ mtlr(R0); 1692 __ mtctr(R11_scratch1); 1693 __ bctr(); 1694 1695 } else { 1696 1697 const Register invoke_ctr = Rscratch1; 1698 // Update Backedge branch separately from invocations. 1699 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1700 1701 if (ProfileInterpreter) { 1702 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1703 if (UseOnStackReplacement) { 1704 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1705 } 1706 } else { 1707 if (UseOnStackReplacement) { 1708 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1709 } 1710 } 1711 } 1712 1713 __ bind(Lforward); 1714 1715 } else { 1716 // Bump bytecode pointer by displacement (take the branch). 1717 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1718 } 1719 // Continue with bytecode @ target. 1720 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1721 // %%%%% and changing dispatch_next to dispatch_only. 1722 __ dispatch_next(vtos); 1723 } 1724 1725 // Helper function for if_cmp* methods below. 1726 // Factored out common compare and branch code. 1727 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1728 Label Lnot_taken; 1729 // Note: The condition code we get is the condition under which we 1730 // *fall through*! So we have to inverse the CC here. 1731 1732 if (is_jint) { 1733 if (cmp0) { 1734 __ cmpwi(CCR0, Rfirst, 0); 1735 } else { 1736 __ cmpw(CCR0, Rfirst, Rsecond); 1737 } 1738 } else { 1739 if (cmp0) { 1740 __ cmpdi(CCR0, Rfirst, 0); 1741 } else { 1742 __ cmpd(CCR0, Rfirst, Rsecond); 1743 } 1744 } 1745 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1746 1747 // Conition is false => Jump! 1748 branch(false, false); 1749 1750 // Condition is not true => Continue. 1751 __ align(32, 12); 1752 __ bind(Lnot_taken); 1753 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1754 } 1755 1756 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1757 void TemplateTable::if_0cmp(Condition cc) { 1758 transition(itos, vtos); 1759 1760 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1761 } 1762 1763 // Compare integer values and fall through if CC holds, branch away otherwise. 1764 // 1765 // Interface: 1766 // - Rfirst: First operand (older stack value) 1767 // - tos: Second operand (younger stack value) 1768 void TemplateTable::if_icmp(Condition cc) { 1769 transition(itos, vtos); 1770 1771 const Register Rfirst = R0, 1772 Rsecond = R17_tos; 1773 1774 __ pop_i(Rfirst); 1775 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1776 } 1777 1778 void TemplateTable::if_nullcmp(Condition cc) { 1779 transition(atos, vtos); 1780 1781 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1782 } 1783 1784 void TemplateTable::if_acmp(Condition cc) { 1785 transition(atos, vtos); 1786 1787 const Register Rfirst = R0, 1788 Rsecond = R17_tos; 1789 1790 __ pop_ptr(Rfirst); 1791 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1792 } 1793 1794 void TemplateTable::ret() { 1795 locals_index(R11_scratch1); 1796 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1797 1798 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1799 1800 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1801 __ add(R11_scratch1, R17_tos, R11_scratch1); 1802 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1803 __ dispatch_next(vtos); 1804 } 1805 1806 void TemplateTable::wide_ret() { 1807 transition(vtos, vtos); 1808 1809 const Register Rindex = R3_ARG1, 1810 Rscratch1 = R11_scratch1, 1811 Rscratch2 = R12_scratch2; 1812 1813 locals_index_wide(Rindex); 1814 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1815 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1816 // Tos now contains the bci, compute the bcp from that. 1817 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1818 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1819 __ add(R14_bcp, Rscratch1, Rscratch2); 1820 __ dispatch_next(vtos); 1821 } 1822 1823 void TemplateTable::tableswitch() { 1824 transition(itos, vtos); 1825 1826 Label Ldispatch, Ldefault_case; 1827 Register Rlow_byte = R3_ARG1, 1828 Rindex = Rlow_byte, 1829 Rhigh_byte = R4_ARG2, 1830 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1831 Rscratch1 = R11_scratch1, 1832 Rscratch2 = R12_scratch2, 1833 Roffset = R6_ARG4; 1834 1835 // Align bcp. 1836 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1837 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1838 1839 // Load lo & hi. 1840 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1841 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1842 1843 // Check for default case (=index outside [low,high]). 1844 __ cmpw(CCR0, R17_tos, Rlow_byte); 1845 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1846 __ blt(CCR0, Ldefault_case); 1847 __ bgt(CCR1, Ldefault_case); 1848 1849 // Lookup dispatch offset. 1850 __ sub(Rindex, R17_tos, Rlow_byte); 1851 __ extsw(Rindex, Rindex); 1852 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1853 __ sldi(Rindex, Rindex, LogBytesPerInt); 1854 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1855 #if defined(VM_LITTLE_ENDIAN) 1856 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1857 __ extsw(Roffset, Roffset); 1858 #else 1859 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1860 #endif 1861 __ b(Ldispatch); 1862 1863 __ bind(Ldefault_case); 1864 __ profile_switch_default(Rhigh_byte, Rscratch1); 1865 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1866 1867 __ bind(Ldispatch); 1868 1869 __ add(R14_bcp, Roffset, R14_bcp); 1870 __ dispatch_next(vtos); 1871 } 1872 1873 void TemplateTable::lookupswitch() { 1874 transition(itos, itos); 1875 __ stop("lookupswitch bytecode should have been rewritten"); 1876 } 1877 1878 // Table switch using linear search through cases. 1879 // Bytecode stream format: 1880 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1881 // Note: Everything is big-endian format here. 1882 void TemplateTable::fast_linearswitch() { 1883 transition(itos, vtos); 1884 1885 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1886 Register Rcount = R3_ARG1, 1887 Rcurrent_pair = R4_ARG2, 1888 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1889 Roffset = R31, // Might need to survive C call. 1890 Rvalue = R12_scratch2, 1891 Rscratch = R11_scratch1, 1892 Rcmp_value = R17_tos; 1893 1894 // Align bcp. 1895 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1896 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1897 1898 // Setup loop counter and limit. 1899 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1900 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1901 1902 __ mtctr(Rcount); 1903 __ cmpwi(CCR0, Rcount, 0); 1904 __ bne(CCR0, Lloop_entry); 1905 1906 // Default case 1907 __ bind(Ldefault_case); 1908 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1909 if (ProfileInterpreter) { 1910 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1911 } 1912 __ b(Lcontinue_execution); 1913 1914 // Next iteration 1915 __ bind(Lsearch_loop); 1916 __ bdz(Ldefault_case); 1917 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1918 __ bind(Lloop_entry); 1919 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1920 __ cmpw(CCR0, Rvalue, Rcmp_value); 1921 __ bne(CCR0, Lsearch_loop); 1922 1923 // Found, load offset. 1924 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1925 // Calculate case index and profile 1926 __ mfctr(Rcurrent_pair); 1927 if (ProfileInterpreter) { 1928 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1929 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1930 } 1931 1932 __ bind(Lcontinue_execution); 1933 __ add(R14_bcp, Roffset, R14_bcp); 1934 __ dispatch_next(vtos); 1935 } 1936 1937 // Table switch using binary search (value/offset pairs are ordered). 1938 // Bytecode stream format: 1939 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1940 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1941 void TemplateTable::fast_binaryswitch() { 1942 1943 transition(itos, vtos); 1944 // Implementation using the following core algorithm: (copied from Intel) 1945 // 1946 // int binary_search(int key, LookupswitchPair* array, int n) { 1947 // // Binary search according to "Methodik des Programmierens" by 1948 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1949 // int i = 0; 1950 // int j = n; 1951 // while (i+1 < j) { 1952 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1953 // // with Q: for all i: 0 <= i < n: key < a[i] 1954 // // where a stands for the array and assuming that the (inexisting) 1955 // // element a[n] is infinitely big. 1956 // int h = (i + j) >> 1; 1957 // // i < h < j 1958 // if (key < array[h].fast_match()) { 1959 // j = h; 1960 // } else { 1961 // i = h; 1962 // } 1963 // } 1964 // // R: a[i] <= key < a[i+1] or Q 1965 // // (i.e., if key is within array, i is the correct index) 1966 // return i; 1967 // } 1968 1969 // register allocation 1970 const Register Rkey = R17_tos; // already set (tosca) 1971 const Register Rarray = R3_ARG1; 1972 const Register Ri = R4_ARG2; 1973 const Register Rj = R5_ARG3; 1974 const Register Rh = R6_ARG4; 1975 const Register Rscratch = R11_scratch1; 1976 1977 const int log_entry_size = 3; 1978 const int entry_size = 1 << log_entry_size; 1979 1980 Label found; 1981 1982 // Find Array start, 1983 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1984 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1985 1986 // initialize i & j 1987 __ li(Ri,0); 1988 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 1989 1990 // and start. 1991 Label entry; 1992 __ b(entry); 1993 1994 // binary search loop 1995 { Label loop; 1996 __ bind(loop); 1997 // int h = (i + j) >> 1; 1998 __ srdi(Rh, Rh, 1); 1999 // if (key < array[h].fast_match()) { 2000 // j = h; 2001 // } else { 2002 // i = h; 2003 // } 2004 __ sldi(Rscratch, Rh, log_entry_size); 2005 #if defined(VM_LITTLE_ENDIAN) 2006 __ lwbrx(Rscratch, Rscratch, Rarray); 2007 #else 2008 __ lwzx(Rscratch, Rscratch, Rarray); 2009 #endif 2010 2011 // if (key < current value) 2012 // Rh = Rj 2013 // else 2014 // Rh = Ri 2015 Label Lgreater; 2016 __ cmpw(CCR0, Rkey, Rscratch); 2017 __ bge(CCR0, Lgreater); 2018 __ mr(Rj, Rh); 2019 __ b(entry); 2020 __ bind(Lgreater); 2021 __ mr(Ri, Rh); 2022 2023 // while (i+1 < j) 2024 __ bind(entry); 2025 __ addi(Rscratch, Ri, 1); 2026 __ cmpw(CCR0, Rscratch, Rj); 2027 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2028 2029 __ blt(CCR0, loop); 2030 } 2031 2032 // End of binary search, result index is i (must check again!). 2033 Label default_case; 2034 Label continue_execution; 2035 if (ProfileInterpreter) { 2036 __ mr(Rh, Ri); // Save index in i for profiling. 2037 } 2038 // Ri = value offset 2039 __ sldi(Ri, Ri, log_entry_size); 2040 __ add(Ri, Ri, Rarray); 2041 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2042 2043 Label not_found; 2044 // Ri = offset offset 2045 __ cmpw(CCR0, Rkey, Rscratch); 2046 __ beq(CCR0, not_found); 2047 // entry not found -> j = default offset 2048 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2049 __ b(default_case); 2050 2051 __ bind(not_found); 2052 // entry found -> j = offset 2053 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2054 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2055 2056 if (ProfileInterpreter) { 2057 __ b(continue_execution); 2058 } 2059 2060 __ bind(default_case); // fall through (if not profiling) 2061 __ profile_switch_default(Ri, Rscratch); 2062 2063 __ bind(continue_execution); 2064 2065 __ extsw(Rj, Rj); 2066 __ add(R14_bcp, Rj, R14_bcp); 2067 __ dispatch_next(vtos); 2068 } 2069 2070 void TemplateTable::_return(TosState state) { 2071 transition(state, state); 2072 assert(_desc->calls_vm(), 2073 "inconsistent calls_vm information"); // call in remove_activation 2074 2075 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2076 2077 Register Rscratch = R11_scratch1, 2078 Rklass = R12_scratch2, 2079 Rklass_flags = Rklass; 2080 Label Lskip_register_finalizer; 2081 2082 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2083 assert(state == vtos, "only valid state"); 2084 __ ld(R17_tos, 0, R18_locals); 2085 2086 // Load klass of this obj. 2087 __ load_klass(Rklass, R17_tos); 2088 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2089 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2090 __ bfalse(CCR0, Lskip_register_finalizer); 2091 2092 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2093 2094 __ align(32, 12); 2095 __ bind(Lskip_register_finalizer); 2096 } 2097 2098 // Move the result value into the correct register and remove memory stack frame. 2099 __ remove_activation(state, /* throw_monitor_exception */ true); 2100 // Restoration of lr done by remove_activation. 2101 switch (state) { 2102 case ltos: 2103 case btos: 2104 case ctos: 2105 case stos: 2106 case atos: 2107 case itos: __ mr(R3_RET, R17_tos); break; 2108 case ftos: 2109 case dtos: __ fmr(F1_RET, F15_ftos); break; 2110 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2111 // to get visible before the reference to the object gets stored anywhere. 2112 __ membar(Assembler::StoreStore); break; 2113 default : ShouldNotReachHere(); 2114 } 2115 __ blr(); 2116 } 2117 2118 // ============================================================================ 2119 // Constant pool cache access 2120 // 2121 // Memory ordering: 2122 // 2123 // Like done in C++ interpreter, we load the fields 2124 // - _indices 2125 // - _f12_oop 2126 // acquired, because these are asked if the cache is already resolved. We don't 2127 // want to float loads above this check. 2128 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2129 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2130 2131 // Call into the VM if call site is not yet resolved 2132 // 2133 // Input regs: 2134 // - None, all passed regs are outputs. 2135 // 2136 // Returns: 2137 // - Rcache: The const pool cache entry that contains the resolved result. 2138 // - Rresult: Either noreg or output for f1/f2. 2139 // 2140 // Kills: 2141 // - Rscratch 2142 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2143 2144 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2145 Label Lresolved, Ldone; 2146 2147 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2148 // We are resolved if the indices offset contains the current bytecode. 2149 #if defined(VM_LITTLE_ENDIAN) 2150 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2151 #else 2152 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2153 #endif 2154 // Acquire by cmp-br-isync (see below). 2155 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2156 __ beq(CCR0, Lresolved); 2157 2158 address entry = NULL; 2159 switch (bytecode()) { 2160 case Bytecodes::_getstatic : // fall through 2161 case Bytecodes::_putstatic : // fall through 2162 case Bytecodes::_getfield : // fall through 2163 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2164 case Bytecodes::_invokevirtual : // fall through 2165 case Bytecodes::_invokespecial : // fall through 2166 case Bytecodes::_invokestatic : // fall through 2167 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2168 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2169 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2170 default : ShouldNotReachHere(); break; 2171 } 2172 __ li(R4_ARG2, (int)bytecode()); 2173 __ call_VM(noreg, entry, R4_ARG2, true); 2174 2175 // Update registers with resolved info. 2176 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2177 __ b(Ldone); 2178 2179 __ bind(Lresolved); 2180 __ isync(); // Order load wrt. succeeding loads. 2181 __ bind(Ldone); 2182 } 2183 2184 // Load the constant pool cache entry at field accesses into registers. 2185 // The Rcache and Rindex registers must be set before call. 2186 // Input: 2187 // - Rcache, Rindex 2188 // Output: 2189 // - Robj, Roffset, Rflags 2190 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2191 Register Rcache, 2192 Register Rindex /* unused on PPC64 */, 2193 Register Roffset, 2194 Register Rflags, 2195 bool is_static = false) { 2196 assert_different_registers(Rcache, Rflags, Roffset); 2197 // assert(Rindex == noreg, "parameter not used on PPC64"); 2198 2199 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2200 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2201 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2202 if (is_static) { 2203 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2204 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2205 // Acquire not needed here. Following access has an address dependency on this value. 2206 } 2207 } 2208 2209 // Load the constant pool cache entry at invokes into registers. 2210 // Resolve if necessary. 2211 2212 // Input Registers: 2213 // - None, bcp is used, though 2214 // 2215 // Return registers: 2216 // - Rmethod (f1 field or f2 if invokevirtual) 2217 // - Ritable_index (f2 field) 2218 // - Rflags (flags field) 2219 // 2220 // Kills: 2221 // - R21 2222 // 2223 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2224 Register Rmethod, 2225 Register Ritable_index, 2226 Register Rflags, 2227 bool is_invokevirtual, 2228 bool is_invokevfinal, 2229 bool is_invokedynamic) { 2230 2231 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2232 // Determine constant pool cache field offsets. 2233 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2234 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2235 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2236 // Access constant pool cache fields. 2237 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2238 2239 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2240 2241 if (is_invokevfinal) { 2242 assert(Ritable_index == noreg, "register not used"); 2243 // Already resolved. 2244 __ get_cache_and_index_at_bcp(Rcache, 1); 2245 } else { 2246 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2247 } 2248 2249 __ ld(Rmethod, method_offset, Rcache); 2250 __ ld(Rflags, flags_offset, Rcache); 2251 2252 if (Ritable_index != noreg) { 2253 __ ld(Ritable_index, index_offset, Rcache); 2254 } 2255 } 2256 2257 // ============================================================================ 2258 // Field access 2259 2260 // Volatile variables demand their effects be made known to all CPU's 2261 // in order. Store buffers on most chips allow reads & writes to 2262 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2263 // without some kind of memory barrier (i.e., it's not sufficient that 2264 // the interpreter does not reorder volatile references, the hardware 2265 // also must not reorder them). 2266 // 2267 // According to the new Java Memory Model (JMM): 2268 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2269 // writes act as aquire & release, so: 2270 // (2) A read cannot let unrelated NON-volatile memory refs that 2271 // happen after the read float up to before the read. It's OK for 2272 // non-volatile memory refs that happen before the volatile read to 2273 // float down below it. 2274 // (3) Similar a volatile write cannot let unrelated NON-volatile 2275 // memory refs that happen BEFORE the write float down to after the 2276 // write. It's OK for non-volatile memory refs that happen after the 2277 // volatile write to float up before it. 2278 // 2279 // We only put in barriers around volatile refs (they are expensive), 2280 // not _between_ memory refs (that would require us to track the 2281 // flavor of the previous memory refs). Requirements (2) and (3) 2282 // require some barriers before volatile stores and after volatile 2283 // loads. These nearly cover requirement (1) but miss the 2284 // volatile-store-volatile-load case. This final case is placed after 2285 // volatile-stores although it could just as well go before 2286 // volatile-loads. 2287 2288 // The registers cache and index expected to be set before call. 2289 // Correct values of the cache and index registers are preserved. 2290 // Kills: 2291 // Rcache (if has_tos) 2292 // Rscratch 2293 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2294 2295 assert_different_registers(Rcache, Rscratch); 2296 2297 if (JvmtiExport::can_post_field_access()) { 2298 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2299 Label Lno_field_access_post; 2300 2301 // Check if post field access in enabled. 2302 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2303 __ lwz(Rscratch, offs, Rscratch); 2304 2305 __ cmpwi(CCR0, Rscratch, 0); 2306 __ beq(CCR0, Lno_field_access_post); 2307 2308 // Post access enabled - do it! 2309 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2310 if (is_static) { 2311 __ li(R17_tos, 0); 2312 } else { 2313 if (has_tos) { 2314 // The fast bytecode versions have obj ptr in register. 2315 // Thus, save object pointer before call_VM() clobbers it 2316 // put object on tos where GC wants it. 2317 __ push_ptr(R17_tos); 2318 } else { 2319 // Load top of stack (do not pop the value off the stack). 2320 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2321 } 2322 __ verify_oop(R17_tos); 2323 } 2324 // tos: object pointer or NULL if static 2325 // cache: cache entry pointer 2326 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2327 if (!is_static && has_tos) { 2328 // Restore object pointer. 2329 __ pop_ptr(R17_tos); 2330 __ verify_oop(R17_tos); 2331 } else { 2332 // Cache is still needed to get class or obj. 2333 __ get_cache_and_index_at_bcp(Rcache, 1); 2334 } 2335 2336 __ align(32, 12); 2337 __ bind(Lno_field_access_post); 2338 } 2339 } 2340 2341 // kills R11_scratch1 2342 void TemplateTable::pop_and_check_object(Register Roop) { 2343 Register Rtmp = R11_scratch1; 2344 2345 assert_different_registers(Rtmp, Roop); 2346 __ pop_ptr(Roop); 2347 // For field access must check obj. 2348 __ null_check_throw(Roop, -1, Rtmp); 2349 __ verify_oop(Roop); 2350 } 2351 2352 // PPC64: implement volatile loads as fence-store-acquire. 2353 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2354 transition(vtos, vtos); 2355 2356 Label Lacquire, Lisync; 2357 2358 const Register Rcache = R3_ARG1, 2359 Rclass_or_obj = R22_tmp2, 2360 Roffset = R23_tmp3, 2361 Rflags = R31, 2362 Rbtable = R5_ARG3, 2363 Rbc = R6_ARG4, 2364 Rscratch = R12_scratch2; 2365 2366 static address field_branch_table[number_of_states], 2367 static_branch_table[number_of_states]; 2368 2369 address* branch_table = is_static ? static_branch_table : field_branch_table; 2370 2371 // Get field offset. 2372 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2373 2374 // JVMTI support 2375 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2376 2377 // Load after possible GC. 2378 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2379 2380 // Load pointer to branch table. 2381 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2382 2383 // Get volatile flag. 2384 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2385 // Note: sync is needed before volatile load on PPC64. 2386 2387 // Check field type. 2388 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2389 2390 #ifdef ASSERT 2391 Label LFlagInvalid; 2392 __ cmpldi(CCR0, Rflags, number_of_states); 2393 __ bge(CCR0, LFlagInvalid); 2394 #endif 2395 2396 // Load from branch table and dispatch (volatile case: one instruction ahead). 2397 __ sldi(Rflags, Rflags, LogBytesPerWord); 2398 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2399 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2400 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2401 } 2402 __ ldx(Rbtable, Rbtable, Rflags); 2403 2404 // Get the obj from stack. 2405 if (!is_static) { 2406 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2407 } else { 2408 __ verify_oop(Rclass_or_obj); 2409 } 2410 2411 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2412 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2413 } 2414 __ mtctr(Rbtable); 2415 __ bctr(); 2416 2417 #ifdef ASSERT 2418 __ bind(LFlagInvalid); 2419 __ stop("got invalid flag", 0x654); 2420 2421 // __ bind(Lvtos); 2422 address pc_before_fence = __ pc(); 2423 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2424 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2425 assert(branch_table[vtos] == 0, "can't compute twice"); 2426 branch_table[vtos] = __ pc(); // non-volatile_entry point 2427 __ stop("vtos unexpected", 0x655); 2428 #endif 2429 2430 __ align(32, 28, 28); // Align load. 2431 // __ bind(Ldtos); 2432 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2433 assert(branch_table[dtos] == 0, "can't compute twice"); 2434 branch_table[dtos] = __ pc(); // non-volatile_entry point 2435 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2436 __ push(dtos); 2437 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2438 { 2439 Label acquire_double; 2440 __ beq(CCR6, acquire_double); // Volatile? 2441 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2442 2443 __ bind(acquire_double); 2444 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2445 __ beq_predict_taken(CCR0, Lisync); 2446 __ b(Lisync); // In case of NAN. 2447 } 2448 2449 __ align(32, 28, 28); // Align load. 2450 // __ bind(Lftos); 2451 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2452 assert(branch_table[ftos] == 0, "can't compute twice"); 2453 branch_table[ftos] = __ pc(); // non-volatile_entry point 2454 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2455 __ push(ftos); 2456 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2457 { 2458 Label acquire_float; 2459 __ beq(CCR6, acquire_float); // Volatile? 2460 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2461 2462 __ bind(acquire_float); 2463 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2464 __ beq_predict_taken(CCR0, Lisync); 2465 __ b(Lisync); // In case of NAN. 2466 } 2467 2468 __ align(32, 28, 28); // Align load. 2469 // __ bind(Litos); 2470 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2471 assert(branch_table[itos] == 0, "can't compute twice"); 2472 branch_table[itos] = __ pc(); // non-volatile_entry point 2473 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2474 __ push(itos); 2475 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2476 __ beq(CCR6, Lacquire); // Volatile? 2477 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2478 2479 __ align(32, 28, 28); // Align load. 2480 // __ bind(Lltos); 2481 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2482 assert(branch_table[ltos] == 0, "can't compute twice"); 2483 branch_table[ltos] = __ pc(); // non-volatile_entry point 2484 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2485 __ push(ltos); 2486 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2487 __ beq(CCR6, Lacquire); // Volatile? 2488 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2489 2490 __ align(32, 28, 28); // Align load. 2491 // __ bind(Lbtos); 2492 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2493 assert(branch_table[btos] == 0, "can't compute twice"); 2494 branch_table[btos] = __ pc(); // non-volatile_entry point 2495 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2496 __ extsb(R17_tos, R17_tos); 2497 __ push(btos); 2498 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2499 __ beq(CCR6, Lacquire); // Volatile? 2500 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2501 2502 __ align(32, 28, 28); // Align load. 2503 // __ bind(Lctos); 2504 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2505 assert(branch_table[ctos] == 0, "can't compute twice"); 2506 branch_table[ctos] = __ pc(); // non-volatile_entry point 2507 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2508 __ push(ctos); 2509 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2510 __ beq(CCR6, Lacquire); // Volatile? 2511 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2512 2513 __ align(32, 28, 28); // Align load. 2514 // __ bind(Lstos); 2515 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2516 assert(branch_table[stos] == 0, "can't compute twice"); 2517 branch_table[stos] = __ pc(); // non-volatile_entry point 2518 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2519 __ push(stos); 2520 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2521 __ beq(CCR6, Lacquire); // Volatile? 2522 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2523 2524 __ align(32, 28, 28); // Align load. 2525 // __ bind(Latos); 2526 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2527 assert(branch_table[atos] == 0, "can't compute twice"); 2528 branch_table[atos] = __ pc(); // non-volatile_entry point 2529 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2530 __ verify_oop(R17_tos); 2531 __ push(atos); 2532 //__ dcbt(R17_tos); // prefetch 2533 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2534 __ beq(CCR6, Lacquire); // Volatile? 2535 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2536 2537 __ align(32, 12); 2538 __ bind(Lacquire); 2539 __ twi_0(R17_tos); 2540 __ bind(Lisync); 2541 __ isync(); // acquire 2542 2543 #ifdef ASSERT 2544 for (int i = 0; i<number_of_states; ++i) { 2545 assert(branch_table[i], "get initialization"); 2546 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2547 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2548 } 2549 #endif 2550 } 2551 2552 void TemplateTable::getfield(int byte_no) { 2553 getfield_or_static(byte_no, false); 2554 } 2555 2556 void TemplateTable::getstatic(int byte_no) { 2557 getfield_or_static(byte_no, true); 2558 } 2559 2560 // The registers cache and index expected to be set before call. 2561 // The function may destroy various registers, just not the cache and index registers. 2562 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2563 2564 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2565 2566 if (JvmtiExport::can_post_field_modification()) { 2567 Label Lno_field_mod_post; 2568 2569 // Check if post field access in enabled. 2570 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2571 __ lwz(Rscratch, offs, Rscratch); 2572 2573 __ cmpwi(CCR0, Rscratch, 0); 2574 __ beq(CCR0, Lno_field_mod_post); 2575 2576 // Do the post 2577 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2578 const Register Robj = Rscratch; 2579 2580 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2581 if (is_static) { 2582 // Life is simple. Null out the object pointer. 2583 __ li(Robj, 0); 2584 } else { 2585 // In case of the fast versions, value lives in registers => put it back on tos. 2586 int offs = Interpreter::expr_offset_in_bytes(0); 2587 Register base = R15_esp; 2588 switch(bytecode()) { 2589 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2590 case Bytecodes::_fast_iputfield: // Fall through 2591 case Bytecodes::_fast_bputfield: // Fall through 2592 case Bytecodes::_fast_cputfield: // Fall through 2593 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2594 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2595 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2596 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2597 default: { 2598 offs = 0; 2599 base = Robj; 2600 const Register Rflags = Robj; 2601 Label is_one_slot; 2602 // Life is harder. The stack holds the value on top, followed by the 2603 // object. We don't know the size of the value, though; it could be 2604 // one or two words depending on its type. As a result, we must find 2605 // the type to determine where the object is. 2606 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2607 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2608 2609 __ cmpwi(CCR0, Rflags, ltos); 2610 __ cmpwi(CCR1, Rflags, dtos); 2611 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2612 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2613 __ beq(CCR0, is_one_slot); 2614 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2615 __ bind(is_one_slot); 2616 break; 2617 } 2618 } 2619 __ ld(Robj, offs, base); 2620 __ verify_oop(Robj); 2621 } 2622 2623 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2624 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2625 __ get_cache_and_index_at_bcp(Rcache, 1); 2626 2627 // In case of the fast versions, value lives in registers => put it back on tos. 2628 switch(bytecode()) { 2629 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2630 case Bytecodes::_fast_iputfield: // Fall through 2631 case Bytecodes::_fast_bputfield: // Fall through 2632 case Bytecodes::_fast_cputfield: // Fall through 2633 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2634 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2635 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2636 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2637 default: break; // Nothin' to do. 2638 } 2639 2640 __ align(32, 12); 2641 __ bind(Lno_field_mod_post); 2642 } 2643 } 2644 2645 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2646 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2647 Label Lvolatile; 2648 2649 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2650 Rclass_or_obj = R31, // Needs to survive C call. 2651 Roffset = R22_tmp2, // Needs to survive C call. 2652 Rflags = R3_ARG1, 2653 Rbtable = R4_ARG2, 2654 Rscratch = R11_scratch1, 2655 Rscratch2 = R12_scratch2, 2656 Rscratch3 = R6_ARG4, 2657 Rbc = Rscratch3; 2658 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2659 2660 static address field_branch_table[number_of_states], 2661 static_branch_table[number_of_states]; 2662 2663 address* branch_table = is_static ? static_branch_table : field_branch_table; 2664 2665 // Stack (grows up): 2666 // value 2667 // obj 2668 2669 // Load the field offset. 2670 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2671 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2672 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2673 2674 // Load pointer to branch table. 2675 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2676 2677 // Get volatile flag. 2678 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2679 2680 // Check the field type. 2681 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2682 2683 #ifdef ASSERT 2684 Label LFlagInvalid; 2685 __ cmpldi(CCR0, Rflags, number_of_states); 2686 __ bge(CCR0, LFlagInvalid); 2687 #endif 2688 2689 // Load from branch table and dispatch (volatile case: one instruction ahead). 2690 __ sldi(Rflags, Rflags, LogBytesPerWord); 2691 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2692 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2693 __ ldx(Rbtable, Rbtable, Rflags); 2694 2695 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2696 __ mtctr(Rbtable); 2697 __ bctr(); 2698 2699 #ifdef ASSERT 2700 __ bind(LFlagInvalid); 2701 __ stop("got invalid flag", 0x656); 2702 2703 // __ bind(Lvtos); 2704 address pc_before_release = __ pc(); 2705 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2706 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2707 assert(branch_table[vtos] == 0, "can't compute twice"); 2708 branch_table[vtos] = __ pc(); // non-volatile_entry point 2709 __ stop("vtos unexpected", 0x657); 2710 #endif 2711 2712 __ align(32, 28, 28); // Align pop. 2713 // __ bind(Ldtos); 2714 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2715 assert(branch_table[dtos] == 0, "can't compute twice"); 2716 branch_table[dtos] = __ pc(); // non-volatile_entry point 2717 __ pop(dtos); 2718 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2719 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2720 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2721 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2722 __ beq(CR_is_vol, Lvolatile); // Volatile? 2723 } 2724 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2725 2726 __ align(32, 28, 28); // Align pop. 2727 // __ bind(Lftos); 2728 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2729 assert(branch_table[ftos] == 0, "can't compute twice"); 2730 branch_table[ftos] = __ pc(); // non-volatile_entry point 2731 __ pop(ftos); 2732 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2733 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2734 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2735 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2736 __ beq(CR_is_vol, Lvolatile); // Volatile? 2737 } 2738 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2739 2740 __ align(32, 28, 28); // Align pop. 2741 // __ bind(Litos); 2742 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2743 assert(branch_table[itos] == 0, "can't compute twice"); 2744 branch_table[itos] = __ pc(); // non-volatile_entry point 2745 __ pop(itos); 2746 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2747 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2748 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2749 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2750 __ beq(CR_is_vol, Lvolatile); // Volatile? 2751 } 2752 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2753 2754 __ align(32, 28, 28); // Align pop. 2755 // __ bind(Lltos); 2756 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2757 assert(branch_table[ltos] == 0, "can't compute twice"); 2758 branch_table[ltos] = __ pc(); // non-volatile_entry point 2759 __ pop(ltos); 2760 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2761 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2762 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2763 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2764 __ beq(CR_is_vol, Lvolatile); // Volatile? 2765 } 2766 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2767 2768 __ align(32, 28, 28); // Align pop. 2769 // __ bind(Lbtos); 2770 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2771 assert(branch_table[btos] == 0, "can't compute twice"); 2772 branch_table[btos] = __ pc(); // non-volatile_entry point 2773 __ pop(btos); 2774 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2775 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2776 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2777 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2778 __ beq(CR_is_vol, Lvolatile); // Volatile? 2779 } 2780 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2781 2782 __ align(32, 28, 28); // Align pop. 2783 // __ bind(Lctos); 2784 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2785 assert(branch_table[ctos] == 0, "can't compute twice"); 2786 branch_table[ctos] = __ pc(); // non-volatile_entry point 2787 __ pop(ctos); 2788 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2789 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2790 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2791 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2792 __ beq(CR_is_vol, Lvolatile); // Volatile? 2793 } 2794 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2795 2796 __ align(32, 28, 28); // Align pop. 2797 // __ bind(Lstos); 2798 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2799 assert(branch_table[stos] == 0, "can't compute twice"); 2800 branch_table[stos] = __ pc(); // non-volatile_entry point 2801 __ pop(stos); 2802 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2803 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2804 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2805 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2806 __ beq(CR_is_vol, Lvolatile); // Volatile? 2807 } 2808 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2809 2810 __ align(32, 28, 28); // Align pop. 2811 // __ bind(Latos); 2812 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2813 assert(branch_table[atos] == 0, "can't compute twice"); 2814 branch_table[atos] = __ pc(); // non-volatile_entry point 2815 __ pop(atos); 2816 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2817 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2818 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2819 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2820 __ beq(CR_is_vol, Lvolatile); // Volatile? 2821 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2822 2823 __ align(32, 12); 2824 __ bind(Lvolatile); 2825 __ fence(); 2826 } 2827 // fallthru: __ b(Lexit); 2828 2829 #ifdef ASSERT 2830 for (int i = 0; i<number_of_states; ++i) { 2831 assert(branch_table[i], "put initialization"); 2832 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2833 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2834 } 2835 #endif 2836 } 2837 2838 void TemplateTable::putfield(int byte_no) { 2839 putfield_or_static(byte_no, false); 2840 } 2841 2842 void TemplateTable::putstatic(int byte_no) { 2843 putfield_or_static(byte_no, true); 2844 } 2845 2846 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2847 void TemplateTable::jvmti_post_fast_field_mod() { 2848 __ should_not_reach_here(); 2849 } 2850 2851 void TemplateTable::fast_storefield(TosState state) { 2852 transition(state, vtos); 2853 2854 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2855 Rclass_or_obj = R31, // Needs to survive C call. 2856 Roffset = R22_tmp2, // Needs to survive C call. 2857 Rflags = R3_ARG1, 2858 Rscratch = R11_scratch1, 2859 Rscratch2 = R12_scratch2, 2860 Rscratch3 = R4_ARG2; 2861 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2862 2863 // Constant pool already resolved => Load flags and offset of field. 2864 __ get_cache_and_index_at_bcp(Rcache, 1); 2865 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2866 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2867 2868 // Get the obj and the final store addr. 2869 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2870 2871 // Get volatile flag. 2872 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2873 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2874 { 2875 Label LnotVolatile; 2876 __ beq(CCR0, LnotVolatile); 2877 __ release(); 2878 __ align(32, 12); 2879 __ bind(LnotVolatile); 2880 } 2881 2882 // Do the store and fencing. 2883 switch(bytecode()) { 2884 case Bytecodes::_fast_aputfield: 2885 // Store into the field. 2886 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2887 break; 2888 2889 case Bytecodes::_fast_iputfield: 2890 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2891 break; 2892 2893 case Bytecodes::_fast_lputfield: 2894 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2895 break; 2896 2897 case Bytecodes::_fast_bputfield: 2898 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2899 break; 2900 2901 case Bytecodes::_fast_cputfield: 2902 case Bytecodes::_fast_sputfield: 2903 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2904 break; 2905 2906 case Bytecodes::_fast_fputfield: 2907 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2908 break; 2909 2910 case Bytecodes::_fast_dputfield: 2911 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2912 break; 2913 2914 default: ShouldNotReachHere(); 2915 } 2916 2917 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2918 Label LVolatile; 2919 __ beq(CR_is_vol, LVolatile); 2920 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2921 2922 __ align(32, 12); 2923 __ bind(LVolatile); 2924 __ fence(); 2925 } 2926 } 2927 2928 void TemplateTable::fast_accessfield(TosState state) { 2929 transition(atos, state); 2930 2931 Label LisVolatile; 2932 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2933 2934 const Register Rcache = R3_ARG1, 2935 Rclass_or_obj = R17_tos, 2936 Roffset = R22_tmp2, 2937 Rflags = R23_tmp3, 2938 Rscratch = R12_scratch2; 2939 2940 // Constant pool already resolved. Get the field offset. 2941 __ get_cache_and_index_at_bcp(Rcache, 1); 2942 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2943 2944 // JVMTI support 2945 jvmti_post_field_access(Rcache, Rscratch, false, true); 2946 2947 // Get the load address. 2948 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 2949 2950 // Get volatile flag. 2951 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2952 __ bne(CCR0, LisVolatile); 2953 2954 switch(bytecode()) { 2955 case Bytecodes::_fast_agetfield: 2956 { 2957 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2958 __ verify_oop(R17_tos); 2959 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2960 2961 __ bind(LisVolatile); 2962 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2963 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2964 __ verify_oop(R17_tos); 2965 __ twi_0(R17_tos); 2966 __ isync(); 2967 break; 2968 } 2969 case Bytecodes::_fast_igetfield: 2970 { 2971 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2972 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2973 2974 __ bind(LisVolatile); 2975 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2976 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2977 __ twi_0(R17_tos); 2978 __ isync(); 2979 break; 2980 } 2981 case Bytecodes::_fast_lgetfield: 2982 { 2983 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2984 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2985 2986 __ bind(LisVolatile); 2987 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2988 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2989 __ twi_0(R17_tos); 2990 __ isync(); 2991 break; 2992 } 2993 case Bytecodes::_fast_bgetfield: 2994 { 2995 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2996 __ extsb(R17_tos, R17_tos); 2997 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2998 2999 __ bind(LisVolatile); 3000 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3001 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3002 __ twi_0(R17_tos); 3003 __ extsb(R17_tos, R17_tos); 3004 __ isync(); 3005 break; 3006 } 3007 case Bytecodes::_fast_cgetfield: 3008 { 3009 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3010 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3011 3012 __ bind(LisVolatile); 3013 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3014 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3015 __ twi_0(R17_tos); 3016 __ isync(); 3017 break; 3018 } 3019 case Bytecodes::_fast_sgetfield: 3020 { 3021 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3022 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3023 3024 __ bind(LisVolatile); 3025 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3026 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3027 __ twi_0(R17_tos); 3028 __ isync(); 3029 break; 3030 } 3031 case Bytecodes::_fast_fgetfield: 3032 { 3033 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3034 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3035 3036 __ bind(LisVolatile); 3037 Label Ldummy; 3038 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3039 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3040 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3041 __ bne_predict_not_taken(CCR0, Ldummy); 3042 __ bind(Ldummy); 3043 __ isync(); 3044 break; 3045 } 3046 case Bytecodes::_fast_dgetfield: 3047 { 3048 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3049 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3050 3051 __ bind(LisVolatile); 3052 Label Ldummy; 3053 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3054 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3055 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3056 __ bne_predict_not_taken(CCR0, Ldummy); 3057 __ bind(Ldummy); 3058 __ isync(); 3059 break; 3060 } 3061 default: ShouldNotReachHere(); 3062 } 3063 } 3064 3065 void TemplateTable::fast_xaccess(TosState state) { 3066 transition(vtos, state); 3067 3068 Label LisVolatile; 3069 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3070 const Register Rcache = R3_ARG1, 3071 Rclass_or_obj = R17_tos, 3072 Roffset = R22_tmp2, 3073 Rflags = R23_tmp3, 3074 Rscratch = R12_scratch2; 3075 3076 __ ld(Rclass_or_obj, 0, R18_locals); 3077 3078 // Constant pool already resolved. Get the field offset. 3079 __ get_cache_and_index_at_bcp(Rcache, 2); 3080 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3081 3082 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3083 3084 // Needed to report exception at the correct bcp. 3085 __ addi(R14_bcp, R14_bcp, 1); 3086 3087 // Get the load address. 3088 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3089 3090 // Get volatile flag. 3091 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3092 __ bne(CCR0, LisVolatile); 3093 3094 switch(state) { 3095 case atos: 3096 { 3097 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3098 __ verify_oop(R17_tos); 3099 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3100 3101 __ bind(LisVolatile); 3102 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3103 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3104 __ verify_oop(R17_tos); 3105 __ twi_0(R17_tos); 3106 __ isync(); 3107 break; 3108 } 3109 case itos: 3110 { 3111 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3112 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3113 3114 __ bind(LisVolatile); 3115 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3116 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3117 __ twi_0(R17_tos); 3118 __ isync(); 3119 break; 3120 } 3121 case ftos: 3122 { 3123 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3124 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3125 3126 __ bind(LisVolatile); 3127 Label Ldummy; 3128 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3129 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3130 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3131 __ bne_predict_not_taken(CCR0, Ldummy); 3132 __ bind(Ldummy); 3133 __ isync(); 3134 break; 3135 } 3136 default: ShouldNotReachHere(); 3137 } 3138 __ addi(R14_bcp, R14_bcp, -1); 3139 } 3140 3141 // ============================================================================ 3142 // Calls 3143 3144 // Common code for invoke 3145 // 3146 // Input: 3147 // - byte_no 3148 // 3149 // Output: 3150 // - Rmethod: The method to invoke next. 3151 // - Rret_addr: The return address to return to. 3152 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3153 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3154 // - Rflags: Method flags from const pool cache. 3155 // 3156 // Kills: 3157 // - Rscratch1 3158 // 3159 void TemplateTable::prepare_invoke(int byte_no, 3160 Register Rmethod, // linked method (or i-klass) 3161 Register Rret_addr,// return address 3162 Register Rindex, // itable index, MethodType, etc. 3163 Register Rrecv, // If caller wants to see it. 3164 Register Rflags, // If caller wants to test it. 3165 Register Rscratch 3166 ) { 3167 // Determine flags. 3168 const Bytecodes::Code code = bytecode(); 3169 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3170 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3171 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3172 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3173 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3174 const bool load_receiver = (Rrecv != noreg); 3175 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3176 3177 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3178 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3179 assert_different_registers(Rret_addr, Rscratch); 3180 3181 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3182 3183 // Saving of SP done in call_from_interpreter. 3184 3185 // Maybe push "appendix" to arguments. 3186 if (is_invokedynamic || is_invokehandle) { 3187 Label Ldone; 3188 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3189 __ beq(CCR0, Ldone); 3190 // Push "appendix" (MethodType, CallSite, etc.). 3191 // This must be done before we get the receiver, 3192 // since the parameter_size includes it. 3193 __ load_resolved_reference_at_index(Rscratch, Rindex); 3194 __ verify_oop(Rscratch); 3195 __ push_ptr(Rscratch); 3196 __ bind(Ldone); 3197 } 3198 3199 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3200 if (load_receiver) { 3201 const Register Rparam_count = Rscratch; 3202 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3203 __ load_receiver(Rparam_count, Rrecv); 3204 __ verify_oop(Rrecv); 3205 } 3206 3207 // Get return address. 3208 { 3209 Register Rtable_addr = Rscratch; 3210 Register Rret_type = Rret_addr; 3211 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3212 3213 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3214 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3215 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3216 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3217 // Get return address. 3218 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3219 } 3220 } 3221 3222 // Helper for virtual calls. Load target out of vtable and jump off! 3223 // Kills all passed registers. 3224 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3225 3226 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3227 const Register Rtarget_method = Rindex; 3228 3229 // Get target method & entry point. 3230 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3231 // Calc vtable addr scale the vtable index by 8. 3232 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3233 // Load target. 3234 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3235 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3236 // Argument and return type profiling. 3237 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3238 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3239 } 3240 3241 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3242 void TemplateTable::invokevirtual(int byte_no) { 3243 transition(vtos, vtos); 3244 3245 Register Rtable_addr = R11_scratch1, 3246 Rret_type = R12_scratch2, 3247 Rret_addr = R5_ARG3, 3248 Rflags = R22_tmp2, // Should survive C call. 3249 Rrecv = R3_ARG1, 3250 Rrecv_klass = Rrecv, 3251 Rvtableindex_or_method = R31, // Should survive C call. 3252 Rnum_params = R4_ARG2, 3253 Rnew_bc = R6_ARG4; 3254 3255 Label LnotFinal; 3256 3257 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3258 3259 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3260 __ bfalse(CCR0, LnotFinal); 3261 3262 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3263 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3264 3265 __ align(32, 12); 3266 __ bind(LnotFinal); 3267 // Load "this" pointer (receiver). 3268 __ rldicl(Rnum_params, Rflags, 64, 48); 3269 __ load_receiver(Rnum_params, Rrecv); 3270 __ verify_oop(Rrecv); 3271 3272 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3273 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3274 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3275 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3276 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3277 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3278 __ load_klass(Rrecv_klass, Rrecv); 3279 __ verify_klass_ptr(Rrecv_klass); 3280 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3281 3282 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3283 } 3284 3285 void TemplateTable::fast_invokevfinal(int byte_no) { 3286 transition(vtos, vtos); 3287 3288 assert(byte_no == f2_byte, "use this argument"); 3289 Register Rflags = R22_tmp2, 3290 Rmethod = R31; 3291 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3292 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3293 } 3294 3295 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3296 3297 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3298 3299 // Load receiver from stack slot. 3300 Register Rrecv = Rscratch2; 3301 Register Rnum_params = Rrecv; 3302 3303 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3304 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3305 3306 // Get return address. 3307 Register Rtable_addr = Rscratch1, 3308 Rret_addr = Rflags, 3309 Rret_type = Rret_addr; 3310 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3311 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3312 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3313 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3314 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3315 3316 // Load receiver and receiver NULL check. 3317 __ load_receiver(Rnum_params, Rrecv); 3318 __ null_check_throw(Rrecv, -1, Rscratch1); 3319 3320 __ profile_final_call(Rrecv, Rscratch1); 3321 // Argument and return type profiling. 3322 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3323 3324 // Do the call. 3325 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3326 } 3327 3328 void TemplateTable::invokespecial(int byte_no) { 3329 assert(byte_no == f1_byte, "use this argument"); 3330 transition(vtos, vtos); 3331 3332 Register Rtable_addr = R3_ARG1, 3333 Rret_addr = R4_ARG2, 3334 Rflags = R5_ARG3, 3335 Rreceiver = R6_ARG4, 3336 Rmethod = R31; 3337 3338 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3339 3340 // Receiver NULL check. 3341 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3342 3343 __ profile_call(R11_scratch1, R12_scratch2); 3344 // Argument and return type profiling. 3345 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3346 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3347 } 3348 3349 void TemplateTable::invokestatic(int byte_no) { 3350 assert(byte_no == f1_byte, "use this argument"); 3351 transition(vtos, vtos); 3352 3353 Register Rtable_addr = R3_ARG1, 3354 Rret_addr = R4_ARG2, 3355 Rflags = R5_ARG3; 3356 3357 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3358 3359 __ profile_call(R11_scratch1, R12_scratch2); 3360 // Argument and return type profiling. 3361 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3362 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3363 } 3364 3365 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3366 Register Rret, 3367 Register Rflags, 3368 Register Rindex, 3369 Register Rtemp1, 3370 Register Rtemp2) { 3371 3372 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3373 Label LnotFinal; 3374 3375 // Check for vfinal. 3376 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3377 __ bfalse(CCR0, LnotFinal); 3378 3379 Register Rscratch = Rflags; // Rflags is dead now. 3380 3381 // Final call case. 3382 __ profile_final_call(Rtemp1, Rscratch); 3383 // Argument and return type profiling. 3384 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); 3385 // Do the final call - the index (f2) contains the method. 3386 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3387 3388 // Non-final callc case. 3389 __ bind(LnotFinal); 3390 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3391 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3392 } 3393 3394 void TemplateTable::invokeinterface(int byte_no) { 3395 assert(byte_no == f1_byte, "use this argument"); 3396 transition(vtos, vtos); 3397 3398 const Register Rscratch1 = R11_scratch1, 3399 Rscratch2 = R12_scratch2, 3400 Rscratch3 = R9_ARG7, 3401 Rscratch4 = R10_ARG8, 3402 Rtable_addr = Rscratch2, 3403 Rinterface_klass = R5_ARG3, 3404 Rret_type = R8_ARG6, 3405 Rret_addr = Rret_type, 3406 Rindex = R6_ARG4, 3407 Rreceiver = R4_ARG2, 3408 Rrecv_klass = Rreceiver, 3409 Rflags = R7_ARG5; 3410 3411 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3412 3413 // Get receiver klass. 3414 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3415 __ load_klass(Rrecv_klass, Rreceiver); 3416 3417 // Check corner case object method. 3418 Label LobjectMethod; 3419 3420 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3421 __ btrue(CCR0, LobjectMethod); 3422 3423 // Fallthrough: The normal invokeinterface case. 3424 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3425 3426 // Find entry point to call. 3427 Label Lthrow_icc, Lthrow_ame; 3428 // Result will be returned in Rindex. 3429 __ mr(Rscratch4, Rrecv_klass); 3430 __ mr(Rscratch3, Rindex); 3431 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3432 3433 __ cmpdi(CCR0, Rindex, 0); 3434 __ beq(CCR0, Lthrow_ame); 3435 // Found entry. Jump off! 3436 // Argument and return type profiling. 3437 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); 3438 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3439 3440 // Vtable entry was NULL => Throw abstract method error. 3441 __ bind(Lthrow_ame); 3442 __ mr(Rrecv_klass, Rscratch4); 3443 __ mr(Rindex, Rscratch3); 3444 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3445 3446 // Interface was not found => Throw incompatible class change error. 3447 __ bind(Lthrow_icc); 3448 __ mr(Rrecv_klass, Rscratch4); 3449 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3450 3451 __ should_not_reach_here(); 3452 3453 // Special case of invokeinterface called for virtual method of 3454 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3455 // The invokeinterface was rewritten to a invokevirtual, hence we have 3456 // to handle this corner case. This code isn't produced by javac, but could 3457 // be produced by another compliant java compiler. 3458 __ bind(LobjectMethod); 3459 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3460 } 3461 3462 void TemplateTable::invokedynamic(int byte_no) { 3463 transition(vtos, vtos); 3464 3465 const Register Rret_addr = R3_ARG1, 3466 Rflags = R4_ARG2, 3467 Rmethod = R22_tmp2, 3468 Rscratch1 = R11_scratch1, 3469 Rscratch2 = R12_scratch2; 3470 3471 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3472 3473 // Profile this call. 3474 __ profile_call(Rscratch1, Rscratch2); 3475 3476 // Off we go. With the new method handles, we don't jump to a method handle 3477 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3478 // to be the callsite object the bootstrap method returned. This is passed to a 3479 // "link" method which does the dispatch (Most likely just grabs the MH stored 3480 // inside the callsite and does an invokehandle). 3481 // Argument and return type profiling. 3482 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3483 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3484 } 3485 3486 void TemplateTable::invokehandle(int byte_no) { 3487 transition(vtos, vtos); 3488 3489 const Register Rret_addr = R3_ARG1, 3490 Rflags = R4_ARG2, 3491 Rrecv = R5_ARG3, 3492 Rmethod = R22_tmp2, 3493 Rscratch1 = R11_scratch1, 3494 Rscratch2 = R12_scratch2; 3495 3496 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3497 __ verify_method_ptr(Rmethod); 3498 __ null_check_throw(Rrecv, -1, Rscratch2); 3499 3500 __ profile_final_call(Rrecv, Rscratch1); 3501 3502 // Still no call from handle => We call the method handle interpreter here. 3503 // Argument and return type profiling. 3504 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3505 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3506 } 3507 3508 // ============================================================================= 3509 // Allocation 3510 3511 // Puts allocated obj ref onto the expression stack. 3512 void TemplateTable::_new() { 3513 transition(vtos, atos); 3514 3515 Label Lslow_case, 3516 Ldone, 3517 Linitialize_header, 3518 Lallocate_shared, 3519 Linitialize_object; // Including clearing the fields. 3520 3521 const Register RallocatedObject = R17_tos, 3522 RinstanceKlass = R9_ARG7, 3523 Rscratch = R11_scratch1, 3524 Roffset = R8_ARG6, 3525 Rinstance_size = Roffset, 3526 Rcpool = R4_ARG2, 3527 Rtags = R3_ARG1, 3528 Rindex = R5_ARG3; 3529 3530 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); 3531 3532 // -------------------------------------------------------------------------- 3533 // Check if fast case is possible. 3534 3535 // Load pointers to const pool and const pool's tags array. 3536 __ get_cpool_and_tags(Rcpool, Rtags); 3537 // Load index of constant pool entry. 3538 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3539 3540 if (UseTLAB) { 3541 // Make sure the class we're about to instantiate has been resolved 3542 // This is done before loading instanceKlass to be consistent with the order 3543 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3544 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3545 __ lbzx(Rtags, Rindex, Rtags); 3546 3547 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3548 __ bne(CCR0, Lslow_case); 3549 3550 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3551 __ sldi(Roffset, Rindex, LogBytesPerWord); 3552 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3553 __ isync(); // Order load of instance Klass wrt. tags. 3554 __ ldx(RinstanceKlass, Roffset, Rscratch); 3555 3556 // Make sure klass is fully initialized and get instance_size. 3557 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3558 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3559 3560 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3561 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3562 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3563 3564 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3565 __ beq(CCR0, Lslow_case); 3566 3567 // -------------------------------------------------------------------------- 3568 // Fast case: 3569 // Allocate the instance. 3570 // 1) Try to allocate in the TLAB. 3571 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3572 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3573 3574 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3575 Register RnewTopValue = R6_ARG4; 3576 Register RendValue = R7_ARG5; 3577 3578 // Check if we can allocate in the TLAB. 3579 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3580 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3581 3582 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3583 3584 // If there is enough space, we do not CAS and do not clear. 3585 __ cmpld(CCR0, RnewTopValue, RendValue); 3586 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3587 3588 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3589 3590 if (ZeroTLAB) { 3591 // The fields have already been cleared. 3592 __ b(Linitialize_header); 3593 } else { 3594 // Initialize both the header and fields. 3595 __ b(Linitialize_object); 3596 } 3597 3598 // Fall through: TLAB was too small. 3599 if (allow_shared_alloc) { 3600 Register RtlabWasteLimitValue = R10_ARG8; 3601 Register RfreeValue = RnewTopValue; 3602 3603 __ bind(Lallocate_shared); 3604 // Check if tlab should be discarded (refill_waste_limit >= free). 3605 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3606 __ subf(RfreeValue, RoldTopValue, RendValue); 3607 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3608 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3609 __ bge(CCR0, Lslow_case); 3610 3611 // Increment waste limit to prevent getting stuck on this slow path. 3612 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3613 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3614 } 3615 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3616 } 3617 // else: Always go the slow path. 3618 3619 // -------------------------------------------------------------------------- 3620 // slow case 3621 __ bind(Lslow_case); 3622 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3623 3624 if (UseTLAB) { 3625 __ b(Ldone); 3626 // -------------------------------------------------------------------------- 3627 // Init1: Zero out newly allocated memory. 3628 3629 if (!ZeroTLAB || allow_shared_alloc) { 3630 // Clear object fields. 3631 __ bind(Linitialize_object); 3632 3633 // Initialize remaining object fields. 3634 Register Rbase = Rtags; 3635 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3636 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3637 __ srdi(Rinstance_size, Rinstance_size, 3); 3638 3639 // Clear out object skipping header. Takes also care of the zero length case. 3640 __ clear_memory_doubleword(Rbase, Rinstance_size); 3641 // fallthru: __ b(Linitialize_header); 3642 } 3643 3644 // -------------------------------------------------------------------------- 3645 // Init2: Initialize the header: mark, klass 3646 __ bind(Linitialize_header); 3647 3648 // Init mark. 3649 if (UseBiasedLocking) { 3650 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3651 } else { 3652 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3653 } 3654 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3655 3656 // Init klass. 3657 __ store_klass_gap(RallocatedObject); 3658 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3659 3660 // Check and trigger dtrace event. 3661 { 3662 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3663 __ push(atos); 3664 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3665 __ pop(atos); 3666 } 3667 } 3668 3669 // continue 3670 __ bind(Ldone); 3671 3672 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3673 __ membar(Assembler::StoreStore); 3674 } 3675 3676 void TemplateTable::newarray() { 3677 transition(itos, atos); 3678 3679 __ lbz(R4, 1, R14_bcp); 3680 __ extsw(R5, R17_tos); 3681 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3682 3683 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3684 __ membar(Assembler::StoreStore); 3685 } 3686 3687 void TemplateTable::anewarray() { 3688 transition(itos, atos); 3689 3690 __ get_constant_pool(R4); 3691 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3692 __ extsw(R6, R17_tos); // size 3693 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3694 3695 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3696 __ membar(Assembler::StoreStore); 3697 } 3698 3699 // Allocate a multi dimensional array 3700 void TemplateTable::multianewarray() { 3701 transition(vtos, atos); 3702 3703 Register Rptr = R31; // Needs to survive C call. 3704 3705 // Put ndims * wordSize into frame temp slot 3706 __ lbz(Rptr, 3, R14_bcp); 3707 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3708 // Esp points past last_dim, so set to R4 to first_dim address. 3709 __ add(R4, Rptr, R15_esp); 3710 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3711 // Pop all dimensions off the stack. 3712 __ add(R15_esp, Rptr, R15_esp); 3713 3714 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3715 __ membar(Assembler::StoreStore); 3716 } 3717 3718 void TemplateTable::arraylength() { 3719 transition(atos, itos); 3720 3721 Label LnoException; 3722 __ verify_oop(R17_tos); 3723 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3724 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3725 } 3726 3727 // ============================================================================ 3728 // Typechecks 3729 3730 void TemplateTable::checkcast() { 3731 transition(atos, atos); 3732 3733 Label Ldone, Lis_null, Lquicked, Lresolved; 3734 Register Roffset = R6_ARG4, 3735 RobjKlass = R4_ARG2, 3736 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3737 Rcpool = R11_scratch1, 3738 Rtags = R12_scratch2; 3739 3740 // Null does not pass. 3741 __ cmpdi(CCR0, R17_tos, 0); 3742 __ beq(CCR0, Lis_null); 3743 3744 // Get constant pool tag to find out if the bytecode has already been "quickened". 3745 __ get_cpool_and_tags(Rcpool, Rtags); 3746 3747 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3748 3749 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3750 __ lbzx(Rtags, Rtags, Roffset); 3751 3752 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3753 __ beq(CCR0, Lquicked); 3754 3755 // Call into the VM to "quicken" instanceof. 3756 __ push_ptr(); // for GC 3757 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3758 __ get_vm_result_2(RspecifiedKlass); 3759 __ pop_ptr(); // Restore receiver. 3760 __ b(Lresolved); 3761 3762 // Extract target class from constant pool. 3763 __ bind(Lquicked); 3764 __ sldi(Roffset, Roffset, LogBytesPerWord); 3765 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3766 __ isync(); // Order load of specified Klass wrt. tags. 3767 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3768 3769 // Do the checkcast. 3770 __ bind(Lresolved); 3771 // Get value klass in RobjKlass. 3772 __ load_klass(RobjKlass, R17_tos); 3773 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3774 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3775 3776 // Not a subtype; so must throw exception 3777 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3778 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3779 __ mtctr(R11_scratch1); 3780 __ bctr(); 3781 3782 // Profile the null case. 3783 __ align(32, 12); 3784 __ bind(Lis_null); 3785 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3786 3787 __ align(32, 12); 3788 __ bind(Ldone); 3789 } 3790 3791 // Output: 3792 // - tos == 0: Obj was null or not an instance of class. 3793 // - tos == 1: Obj was an instance of class. 3794 void TemplateTable::instanceof() { 3795 transition(atos, itos); 3796 3797 Label Ldone, Lis_null, Lquicked, Lresolved; 3798 Register Roffset = R5_ARG3, 3799 RobjKlass = R4_ARG2, 3800 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register. 3801 Rcpool = R11_scratch1, 3802 Rtags = R12_scratch2; 3803 3804 // Null does not pass. 3805 __ cmpdi(CCR0, R17_tos, 0); 3806 __ beq(CCR0, Lis_null); 3807 3808 // Get constant pool tag to find out if the bytecode has already been "quickened". 3809 __ get_cpool_and_tags(Rcpool, Rtags); 3810 3811 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3812 3813 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3814 __ lbzx(Rtags, Rtags, Roffset); 3815 3816 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3817 __ beq(CCR0, Lquicked); 3818 3819 // Call into the VM to "quicken" instanceof. 3820 __ push_ptr(); // for GC 3821 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3822 __ get_vm_result_2(RspecifiedKlass); 3823 __ pop_ptr(); // Restore receiver. 3824 __ b(Lresolved); 3825 3826 // Extract target class from constant pool. 3827 __ bind(Lquicked); 3828 __ sldi(Roffset, Roffset, LogBytesPerWord); 3829 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3830 __ isync(); // Order load of specified Klass wrt. tags. 3831 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3832 3833 // Do the checkcast. 3834 __ bind(Lresolved); 3835 // Get value klass in RobjKlass. 3836 __ load_klass(RobjKlass, R17_tos); 3837 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3838 __ li(R17_tos, 1); 3839 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3840 __ li(R17_tos, 0); 3841 3842 if (ProfileInterpreter) { 3843 __ b(Ldone); 3844 } 3845 3846 // Profile the null case. 3847 __ align(32, 12); 3848 __ bind(Lis_null); 3849 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3850 3851 __ align(32, 12); 3852 __ bind(Ldone); 3853 } 3854 3855 // ============================================================================= 3856 // Breakpoints 3857 3858 void TemplateTable::_breakpoint() { 3859 transition(vtos, vtos); 3860 3861 // Get the unpatched byte code. 3862 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3863 __ mr(R31, R3_RET); 3864 3865 // Post the breakpoint event. 3866 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3867 3868 // Complete the execution of original bytecode. 3869 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3870 } 3871 3872 // ============================================================================= 3873 // Exceptions 3874 3875 void TemplateTable::athrow() { 3876 transition(atos, vtos); 3877 3878 // Exception oop is in tos 3879 __ verify_oop(R17_tos); 3880 3881 __ null_check_throw(R17_tos, -1, R11_scratch1); 3882 3883 // Throw exception interpreter entry expects exception oop to be in R3. 3884 __ mr(R3_RET, R17_tos); 3885 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3886 __ mtctr(R11_scratch1); 3887 __ bctr(); 3888 } 3889 3890 // ============================================================================= 3891 // Synchronization 3892 // Searches the basic object lock list on the stack for a free slot 3893 // and uses it to lock the obect in tos. 3894 // 3895 // Recursive locking is enabled by exiting the search if the same 3896 // object is already found in the list. Thus, a new basic lock obj lock 3897 // is allocated "higher up" in the stack and thus is found first 3898 // at next monitor exit. 3899 void TemplateTable::monitorenter() { 3900 transition(atos, vtos); 3901 3902 __ verify_oop(R17_tos); 3903 3904 Register Rcurrent_monitor = R11_scratch1, 3905 Rcurrent_obj = R12_scratch2, 3906 Robj_to_lock = R17_tos, 3907 Rscratch1 = R3_ARG1, 3908 Rscratch2 = R4_ARG2, 3909 Rscratch3 = R5_ARG3, 3910 Rcurrent_obj_addr = R6_ARG4; 3911 3912 // ------------------------------------------------------------------------------ 3913 // Null pointer exception. 3914 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3915 3916 // Try to acquire a lock on the object. 3917 // Repeat until succeeded (i.e., until monitorenter returns true). 3918 3919 // ------------------------------------------------------------------------------ 3920 // Find a free slot in the monitor block. 3921 Label Lfound, Lexit, Lallocate_new; 3922 ConditionRegister found_free_slot = CCR0, 3923 found_same_obj = CCR1, 3924 reached_limit = CCR6; 3925 { 3926 Label Lloop, Lentry; 3927 Register Rlimit = Rcurrent_monitor; 3928 3929 // Set up search loop - start with topmost monitor. 3930 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 3931 3932 __ ld(Rlimit, 0, R1_SP); 3933 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 3934 3935 // Check if any slot is present => short cut to allocation if not. 3936 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3937 __ bgt(reached_limit, Lallocate_new); 3938 3939 // Pre-load topmost slot. 3940 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3941 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3942 // The search loop. 3943 __ bind(Lloop); 3944 // Found free slot? 3945 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 3946 // Is this entry for same obj? If so, stop the search and take the found 3947 // free slot or allocate a new one to enable recursive locking. 3948 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 3949 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3950 __ beq(found_free_slot, Lexit); 3951 __ beq(found_same_obj, Lallocate_new); 3952 __ bgt(reached_limit, Lallocate_new); 3953 // Check if last allocated BasicLockObj reached. 3954 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3955 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3956 // Next iteration if unchecked BasicObjectLocks exist on the stack. 3957 __ b(Lloop); 3958 } 3959 3960 // ------------------------------------------------------------------------------ 3961 // Check if we found a free slot. 3962 __ bind(Lexit); 3963 3964 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 3965 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 3966 __ b(Lfound); 3967 3968 // We didn't find a free BasicObjLock => allocate one. 3969 __ align(32, 12); 3970 __ bind(Lallocate_new); 3971 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 3972 __ mr(Rcurrent_monitor, R26_monitor); 3973 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 3974 3975 // ------------------------------------------------------------------------------ 3976 // We now have a slot to lock. 3977 __ bind(Lfound); 3978 3979 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3980 // The object has already been poped from the stack, so the expression stack looks correct. 3981 __ addi(R14_bcp, R14_bcp, 1); 3982 3983 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 3984 __ lock_object(Rcurrent_monitor, Robj_to_lock); 3985 3986 // Check if there's enough space on the stack for the monitors after locking. 3987 Label Lskip_stack_check; 3988 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 3989 // the stack check. There should be enough shadow pages to fit that in. 3990 __ ld(Rscratch3, 0, R1_SP); 3991 __ sub(Rscratch3, Rscratch3, R26_monitor); 3992 __ cmpdi(CCR0, Rscratch3, 4*K); 3993 __ blt(CCR0, Lskip_stack_check); 3994 3995 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 3996 __ li(Rscratch1, 0); 3997 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 3998 3999 __ align(32, 12); 4000 __ bind(Lskip_stack_check); 4001 4002 // The bcp has already been incremented. Just need to dispatch to next instruction. 4003 __ dispatch_next(vtos); 4004 } 4005 4006 void TemplateTable::monitorexit() { 4007 transition(atos, vtos); 4008 __ verify_oop(R17_tos); 4009 4010 Register Rcurrent_monitor = R11_scratch1, 4011 Rcurrent_obj = R12_scratch2, 4012 Robj_to_lock = R17_tos, 4013 Rcurrent_obj_addr = R3_ARG1, 4014 Rlimit = R4_ARG2; 4015 Label Lfound, Lillegal_monitor_state; 4016 4017 // Check corner case: unbalanced monitorEnter / Exit. 4018 __ ld(Rlimit, 0, R1_SP); 4019 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4020 4021 // Null pointer check. 4022 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4023 4024 __ cmpld(CCR0, R26_monitor, Rlimit); 4025 __ bgt(CCR0, Lillegal_monitor_state); 4026 4027 // Find the corresponding slot in the monitors stack section. 4028 { 4029 Label Lloop; 4030 4031 // Start with topmost monitor. 4032 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4033 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4034 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4035 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4036 4037 __ bind(Lloop); 4038 // Is this entry for same obj? 4039 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4040 __ beq(CCR0, Lfound); 4041 4042 // Check if last allocated BasicLockObj reached. 4043 4044 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4045 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4046 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4047 4048 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4049 __ ble(CCR0, Lloop); 4050 } 4051 4052 // Fell through without finding the basic obj lock => throw up! 4053 __ bind(Lillegal_monitor_state); 4054 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4055 __ should_not_reach_here(); 4056 4057 __ align(32, 12); 4058 __ bind(Lfound); 4059 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4060 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4061 __ unlock_object(Rcurrent_monitor); 4062 } 4063 4064 // ============================================================================ 4065 // Wide bytecodes 4066 4067 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4068 void TemplateTable::wide() { 4069 transition(vtos, vtos); 4070 4071 const Register Rtable = R11_scratch1, 4072 Rindex = R12_scratch2, 4073 Rtmp = R0; 4074 4075 __ lbz(Rindex, 1, R14_bcp); 4076 4077 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4078 4079 __ slwi(Rindex, Rindex, LogBytesPerWord); 4080 __ ldx(Rtmp, Rtable, Rindex); 4081 __ mtctr(Rtmp); 4082 __ bctr(); 4083 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4084 } 4085 #endif // !CC_INTERP