1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.inline.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #ifndef CC_INTERP 43 44 #undef __ 45 #define __ _masm-> 46 47 // ============================================================================ 48 // Misc helpers 49 50 // Do an oop store like *(base + index) = val OR *(base + offset) = val 51 // (only one of both variants is possible at the same time). 52 // Index can be noreg. 53 // Kills: 54 // Rbase, Rtmp 55 static void do_oop_store(InterpreterMacroAssembler* _masm, 56 Register Rbase, 57 RegisterOrConstant offset, 58 Register Rval, // Noreg means always null. 59 Register Rtmp1, 60 Register Rtmp2, 61 Register Rtmp3, 62 BarrierSet::Name barrier, 63 bool precise, 64 bool check_null) { 65 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 66 67 switch (barrier) { 68 #if INCLUDE_ALL_GCS 69 case BarrierSet::G1SATBCTLogging: 70 { 71 // Load and record the previous value. 72 __ g1_write_barrier_pre(Rbase, offset, 73 Rtmp3, /* holder of pre_val ? */ 74 Rtmp1, Rtmp2, false /* frame */); 75 76 Label Lnull, Ldone; 77 if (Rval != noreg) { 78 if (check_null) { 79 __ cmpdi(CCR0, Rval, 0); 80 __ beq(CCR0, Lnull); 81 } 82 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 83 // Mark the card. 84 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 85 __ add(Rbase, offset, Rbase); 86 } 87 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 88 if (check_null) { __ b(Ldone); } 89 } 90 91 if (Rval == noreg || check_null) { // Store null oop. 92 Register Rnull = Rval; 93 __ bind(Lnull); 94 if (Rval == noreg) { 95 Rnull = Rtmp1; 96 __ li(Rnull, 0); 97 } 98 if (UseCompressedOops) { 99 __ stw(Rnull, offset, Rbase); 100 } else { 101 __ std(Rnull, offset, Rbase); 102 } 103 } 104 __ bind(Ldone); 105 } 106 break; 107 #endif // INCLUDE_ALL_GCS 108 case BarrierSet::CardTableModRef: 109 case BarrierSet::CardTableExtension: 110 { 111 Label Lnull, Ldone; 112 if (Rval != noreg) { 113 if (check_null) { 114 __ cmpdi(CCR0, Rval, 0); 115 __ beq(CCR0, Lnull); 116 } 117 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 118 // Mark the card. 119 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 120 __ add(Rbase, offset, Rbase); 121 } 122 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 123 if (check_null) { 124 __ b(Ldone); 125 } 126 } 127 128 if (Rval == noreg || check_null) { // Store null oop. 129 Register Rnull = Rval; 130 __ bind(Lnull); 131 if (Rval == noreg) { 132 Rnull = Rtmp1; 133 __ li(Rnull, 0); 134 } 135 if (UseCompressedOops) { 136 __ stw(Rnull, offset, Rbase); 137 } else { 138 __ std(Rnull, offset, Rbase); 139 } 140 } 141 __ bind(Ldone); 142 } 143 break; 144 case BarrierSet::ModRef: 145 ShouldNotReachHere(); 146 break; 147 default: 148 ShouldNotReachHere(); 149 } 150 } 151 152 // ============================================================================ 153 // Platform-dependent initialization 154 155 void TemplateTable::pd_initialize() { 156 // No ppc64 specific initialization. 157 } 158 159 Address TemplateTable::at_bcp(int offset) { 160 // Not used on ppc. 161 ShouldNotReachHere(); 162 return Address(); 163 } 164 165 // Patches the current bytecode (ptr to it located in bcp) 166 // in the bytecode stream with a new one. 167 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 168 // With sharing on, may need to test method flag. 169 if (!RewriteBytecodes) return; 170 Label L_patch_done; 171 172 switch (new_bc) { 173 case Bytecodes::_fast_aputfield: 174 case Bytecodes::_fast_bputfield: 175 case Bytecodes::_fast_cputfield: 176 case Bytecodes::_fast_dputfield: 177 case Bytecodes::_fast_fputfield: 178 case Bytecodes::_fast_iputfield: 179 case Bytecodes::_fast_lputfield: 180 case Bytecodes::_fast_sputfield: 181 { 182 // We skip bytecode quickening for putfield instructions when 183 // the put_code written to the constant pool cache is zero. 184 // This is required so that every execution of this instruction 185 // calls out to InterpreterRuntime::resolve_get_put to do 186 // additional, required work. 187 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 188 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 189 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 190 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 191 #if defined(VM_LITTLE_ENDIAN) 192 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 193 #else 194 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 195 #endif 196 __ cmpwi(CCR0, Rnew_bc, 0); 197 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 198 __ beq(CCR0, L_patch_done); 199 // __ isync(); // acquire not needed 200 break; 201 } 202 203 default: 204 assert(byte_no == -1, "sanity"); 205 if (load_bc_into_bc_reg) { 206 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 207 } 208 } 209 210 if (JvmtiExport::can_post_breakpoint()) { 211 Label L_fast_patch; 212 __ lbz(Rtemp, 0, R14_bcp); 213 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 214 __ bne(CCR0, L_fast_patch); 215 // Perform the quickening, slowly, in the bowels of the breakpoint table. 216 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 217 __ b(L_patch_done); 218 __ bind(L_fast_patch); 219 } 220 221 // Patch bytecode. 222 __ stb(Rnew_bc, 0, R14_bcp); 223 224 __ bind(L_patch_done); 225 } 226 227 // ============================================================================ 228 // Individual instructions 229 230 void TemplateTable::nop() { 231 transition(vtos, vtos); 232 // Nothing to do. 233 } 234 235 void TemplateTable::shouldnotreachhere() { 236 transition(vtos, vtos); 237 __ stop("shouldnotreachhere bytecode"); 238 } 239 240 void TemplateTable::aconst_null() { 241 transition(vtos, atos); 242 __ li(R17_tos, 0); 243 } 244 245 void TemplateTable::iconst(int value) { 246 transition(vtos, itos); 247 assert(value >= -1 && value <= 5, ""); 248 __ li(R17_tos, value); 249 } 250 251 void TemplateTable::lconst(int value) { 252 transition(vtos, ltos); 253 assert(value >= -1 && value <= 5, ""); 254 __ li(R17_tos, value); 255 } 256 257 void TemplateTable::fconst(int value) { 258 transition(vtos, ftos); 259 static float zero = 0.0; 260 static float one = 1.0; 261 static float two = 2.0; 262 switch (value) { 263 default: ShouldNotReachHere(); 264 case 0: { 265 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 266 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 267 break; 268 } 269 case 1: { 270 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 271 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 272 break; 273 } 274 case 2: { 275 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 276 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 277 break; 278 } 279 } 280 } 281 282 void TemplateTable::dconst(int value) { 283 transition(vtos, dtos); 284 static double zero = 0.0; 285 static double one = 1.0; 286 switch (value) { 287 case 0: { 288 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 289 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 290 break; 291 } 292 case 1: { 293 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 294 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 295 break; 296 } 297 default: ShouldNotReachHere(); 298 } 299 } 300 301 void TemplateTable::bipush() { 302 transition(vtos, itos); 303 __ lbz(R17_tos, 1, R14_bcp); 304 __ extsb(R17_tos, R17_tos); 305 } 306 307 void TemplateTable::sipush() { 308 transition(vtos, itos); 309 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 310 } 311 312 void TemplateTable::ldc(bool wide) { 313 Register Rscratch1 = R11_scratch1, 314 Rscratch2 = R12_scratch2, 315 Rcpool = R3_ARG1; 316 317 transition(vtos, vtos); 318 Label notInt, notClass, exit; 319 320 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 321 if (wide) { // Read index. 322 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 323 } else { 324 __ lbz(Rscratch1, 1, R14_bcp); 325 } 326 327 const int base_offset = ConstantPool::header_size() * wordSize; 328 const int tags_offset = Array<u1>::base_offset_in_bytes(); 329 330 // Get type from tags. 331 __ addi(Rscratch2, Rscratch2, tags_offset); 332 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 333 334 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 335 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 336 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 337 338 // Resolved class - need to call vm to get java mirror of the class. 339 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 340 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 341 __ beq(CCR0, notClass); 342 343 __ li(R4, wide ? 1 : 0); 344 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 345 __ push(atos); 346 __ b(exit); 347 348 __ align(32, 12); 349 __ bind(notClass); 350 __ addi(Rcpool, Rcpool, base_offset); 351 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 352 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 353 __ bne(CCR0, notInt); 354 __ lwax(R17_tos, Rcpool, Rscratch1); 355 __ push(itos); 356 __ b(exit); 357 358 __ align(32, 12); 359 __ bind(notInt); 360 #ifdef ASSERT 361 // String and Object are rewritten to fast_aldc 362 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 363 __ asm_assert_eq("unexpected type", 0x8765); 364 #endif 365 __ lfsx(F15_ftos, Rcpool, Rscratch1); 366 __ push(ftos); 367 368 __ align(32, 12); 369 __ bind(exit); 370 } 371 372 // Fast path for caching oop constants. 373 void TemplateTable::fast_aldc(bool wide) { 374 transition(vtos, atos); 375 376 int index_size = wide ? sizeof(u2) : sizeof(u1); 377 const Register Rscratch = R11_scratch1; 378 Label is_null; 379 380 // We are resolved if the resolved reference cache entry contains a 381 // non-null object (CallSite, etc.) 382 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 383 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 384 __ verify_oop(R17_tos); 385 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 386 387 __ bind(is_null); 388 __ load_const_optimized(R3_ARG1, (int)bytecode()); 389 390 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 391 392 // First time invocation - must resolve first. 393 __ call_VM(R17_tos, entry, R3_ARG1); 394 __ verify_oop(R17_tos); 395 } 396 397 void TemplateTable::ldc2_w() { 398 transition(vtos, vtos); 399 Label Llong, Lexit; 400 401 Register Rindex = R11_scratch1, 402 Rcpool = R12_scratch2, 403 Rtag = R3_ARG1; 404 __ get_cpool_and_tags(Rcpool, Rtag); 405 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 406 407 const int base_offset = ConstantPool::header_size() * wordSize; 408 const int tags_offset = Array<u1>::base_offset_in_bytes(); 409 // Get type from tags. 410 __ addi(Rcpool, Rcpool, base_offset); 411 __ addi(Rtag, Rtag, tags_offset); 412 413 __ lbzx(Rtag, Rtag, Rindex); 414 415 __ sldi(Rindex, Rindex, LogBytesPerWord); 416 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 417 __ bne(CCR0, Llong); 418 // A double can be placed at word-aligned locations in the constant pool. 419 // Check out Conversions.java for an example. 420 // Also ConstantPool::header_size() is 20, which makes it very difficult 421 // to double-align double on the constant pool. SG, 11/7/97 422 __ lfdx(F15_ftos, Rcpool, Rindex); 423 __ push(dtos); 424 __ b(Lexit); 425 426 __ bind(Llong); 427 __ ldx(R17_tos, Rcpool, Rindex); 428 __ push(ltos); 429 430 __ bind(Lexit); 431 } 432 433 // Get the locals index located in the bytecode stream at bcp + offset. 434 void TemplateTable::locals_index(Register Rdst, int offset) { 435 __ lbz(Rdst, offset, R14_bcp); 436 } 437 438 void TemplateTable::iload() { 439 transition(vtos, itos); 440 441 // Get the local value into tos 442 const Register Rindex = R22_tmp2; 443 locals_index(Rindex); 444 445 // Rewrite iload,iload pair into fast_iload2 446 // iload,caload pair into fast_icaload 447 if (RewriteFrequentPairs) { 448 Label Lrewrite, Ldone; 449 Register Rnext_byte = R3_ARG1, 450 Rrewrite_to = R6_ARG4, 451 Rscratch = R11_scratch1; 452 453 // get next byte 454 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 455 456 // if _iload, wait to rewrite to iload2. We only want to rewrite the 457 // last two iloads in a pair. Comparing against fast_iload means that 458 // the next bytecode is neither an iload or a caload, and therefore 459 // an iload pair. 460 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 461 __ beq(CCR0, Ldone); 462 463 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 464 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 465 __ beq(CCR1, Lrewrite); 466 467 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 468 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 469 __ beq(CCR0, Lrewrite); 470 471 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 472 473 __ bind(Lrewrite); 474 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 475 __ bind(Ldone); 476 } 477 478 __ load_local_int(R17_tos, Rindex, Rindex); 479 } 480 481 // Load 2 integers in a row without dispatching 482 void TemplateTable::fast_iload2() { 483 transition(vtos, itos); 484 485 __ lbz(R3_ARG1, 1, R14_bcp); 486 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 487 488 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 489 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 490 __ push_i(R3_ARG1); 491 } 492 493 void TemplateTable::fast_iload() { 494 transition(vtos, itos); 495 // Get the local value into tos 496 497 const Register Rindex = R11_scratch1; 498 locals_index(Rindex); 499 __ load_local_int(R17_tos, Rindex, Rindex); 500 } 501 502 // Load a local variable type long from locals area to TOS cache register. 503 // Local index resides in bytecodestream. 504 void TemplateTable::lload() { 505 transition(vtos, ltos); 506 507 const Register Rindex = R11_scratch1; 508 locals_index(Rindex); 509 __ load_local_long(R17_tos, Rindex, Rindex); 510 } 511 512 void TemplateTable::fload() { 513 transition(vtos, ftos); 514 515 const Register Rindex = R11_scratch1; 516 locals_index(Rindex); 517 __ load_local_float(F15_ftos, Rindex, Rindex); 518 } 519 520 void TemplateTable::dload() { 521 transition(vtos, dtos); 522 523 const Register Rindex = R11_scratch1; 524 locals_index(Rindex); 525 __ load_local_double(F15_ftos, Rindex, Rindex); 526 } 527 528 void TemplateTable::aload() { 529 transition(vtos, atos); 530 531 const Register Rindex = R11_scratch1; 532 locals_index(Rindex); 533 __ load_local_ptr(R17_tos, Rindex, Rindex); 534 } 535 536 void TemplateTable::locals_index_wide(Register Rdst) { 537 // Offset is 2, not 1, because Lbcp points to wide prefix code. 538 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 539 } 540 541 void TemplateTable::wide_iload() { 542 // Get the local value into tos. 543 544 const Register Rindex = R11_scratch1; 545 locals_index_wide(Rindex); 546 __ load_local_int(R17_tos, Rindex, Rindex); 547 } 548 549 void TemplateTable::wide_lload() { 550 transition(vtos, ltos); 551 552 const Register Rindex = R11_scratch1; 553 locals_index_wide(Rindex); 554 __ load_local_long(R17_tos, Rindex, Rindex); 555 } 556 557 void TemplateTable::wide_fload() { 558 transition(vtos, ftos); 559 560 const Register Rindex = R11_scratch1; 561 locals_index_wide(Rindex); 562 __ load_local_float(F15_ftos, Rindex, Rindex); 563 } 564 565 void TemplateTable::wide_dload() { 566 transition(vtos, dtos); 567 568 const Register Rindex = R11_scratch1; 569 locals_index_wide(Rindex); 570 __ load_local_double(F15_ftos, Rindex, Rindex); 571 } 572 573 void TemplateTable::wide_aload() { 574 transition(vtos, atos); 575 576 const Register Rindex = R11_scratch1; 577 locals_index_wide(Rindex); 578 __ load_local_ptr(R17_tos, Rindex, Rindex); 579 } 580 581 void TemplateTable::iaload() { 582 transition(itos, itos); 583 584 const Register Rload_addr = R3_ARG1, 585 Rarray = R4_ARG2, 586 Rtemp = R5_ARG3; 587 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 588 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 589 } 590 591 void TemplateTable::laload() { 592 transition(itos, ltos); 593 594 const Register Rload_addr = R3_ARG1, 595 Rarray = R4_ARG2, 596 Rtemp = R5_ARG3; 597 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 598 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 599 } 600 601 void TemplateTable::faload() { 602 transition(itos, ftos); 603 604 const Register Rload_addr = R3_ARG1, 605 Rarray = R4_ARG2, 606 Rtemp = R5_ARG3; 607 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 608 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 609 } 610 611 void TemplateTable::daload() { 612 transition(itos, dtos); 613 614 const Register Rload_addr = R3_ARG1, 615 Rarray = R4_ARG2, 616 Rtemp = R5_ARG3; 617 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 618 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 619 } 620 621 void TemplateTable::aaload() { 622 transition(itos, atos); 623 624 // tos: index 625 // result tos: array 626 const Register Rload_addr = R3_ARG1, 627 Rarray = R4_ARG2, 628 Rtemp = R5_ARG3; 629 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 630 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 631 __ verify_oop(R17_tos); 632 //__ dcbt(R17_tos); // prefetch 633 } 634 635 void TemplateTable::baload() { 636 transition(itos, itos); 637 638 const Register Rload_addr = R3_ARG1, 639 Rarray = R4_ARG2, 640 Rtemp = R5_ARG3; 641 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 642 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 643 __ extsb(R17_tos, R17_tos); 644 } 645 646 void TemplateTable::caload() { 647 transition(itos, itos); 648 649 const Register Rload_addr = R3_ARG1, 650 Rarray = R4_ARG2, 651 Rtemp = R5_ARG3; 652 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 653 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 654 } 655 656 // Iload followed by caload frequent pair. 657 void TemplateTable::fast_icaload() { 658 transition(vtos, itos); 659 660 const Register Rload_addr = R3_ARG1, 661 Rarray = R4_ARG2, 662 Rtemp = R11_scratch1; 663 664 locals_index(R17_tos); 665 __ load_local_int(R17_tos, Rtemp, R17_tos); 666 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 667 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 668 } 669 670 void TemplateTable::saload() { 671 transition(itos, itos); 672 673 const Register Rload_addr = R11_scratch1, 674 Rarray = R12_scratch2, 675 Rtemp = R3_ARG1; 676 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 677 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 678 } 679 680 void TemplateTable::iload(int n) { 681 transition(vtos, itos); 682 683 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 684 } 685 686 void TemplateTable::lload(int n) { 687 transition(vtos, ltos); 688 689 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 690 } 691 692 void TemplateTable::fload(int n) { 693 transition(vtos, ftos); 694 695 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 696 } 697 698 void TemplateTable::dload(int n) { 699 transition(vtos, dtos); 700 701 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 702 } 703 704 void TemplateTable::aload(int n) { 705 transition(vtos, atos); 706 707 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 708 } 709 710 void TemplateTable::aload_0() { 711 transition(vtos, atos); 712 // According to bytecode histograms, the pairs: 713 // 714 // _aload_0, _fast_igetfield 715 // _aload_0, _fast_agetfield 716 // _aload_0, _fast_fgetfield 717 // 718 // occur frequently. If RewriteFrequentPairs is set, the (slow) 719 // _aload_0 bytecode checks if the next bytecode is either 720 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 721 // rewrites the current bytecode into a pair bytecode; otherwise it 722 // rewrites the current bytecode into _0 that doesn't do 723 // the pair check anymore. 724 // 725 // Note: If the next bytecode is _getfield, the rewrite must be 726 // delayed, otherwise we may miss an opportunity for a pair. 727 // 728 // Also rewrite frequent pairs 729 // aload_0, aload_1 730 // aload_0, iload_1 731 // These bytecodes with a small amount of code are most profitable 732 // to rewrite. 733 734 if (RewriteFrequentPairs) { 735 736 Label Lrewrite, Ldont_rewrite; 737 Register Rnext_byte = R3_ARG1, 738 Rrewrite_to = R6_ARG4, 739 Rscratch = R11_scratch1; 740 741 // Get next byte. 742 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 743 744 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 745 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 746 __ beq(CCR0, Ldont_rewrite); 747 748 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 749 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 750 __ beq(CCR1, Lrewrite); 751 752 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 753 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 754 __ beq(CCR0, Lrewrite); 755 756 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 757 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 758 __ beq(CCR1, Lrewrite); 759 760 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 761 762 __ bind(Lrewrite); 763 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 764 __ bind(Ldont_rewrite); 765 } 766 767 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 768 aload(0); 769 } 770 771 void TemplateTable::istore() { 772 transition(itos, vtos); 773 774 const Register Rindex = R11_scratch1; 775 locals_index(Rindex); 776 __ store_local_int(R17_tos, Rindex); 777 } 778 779 void TemplateTable::lstore() { 780 transition(ltos, vtos); 781 const Register Rindex = R11_scratch1; 782 locals_index(Rindex); 783 __ store_local_long(R17_tos, Rindex); 784 } 785 786 void TemplateTable::fstore() { 787 transition(ftos, vtos); 788 789 const Register Rindex = R11_scratch1; 790 locals_index(Rindex); 791 __ store_local_float(F15_ftos, Rindex); 792 } 793 794 void TemplateTable::dstore() { 795 transition(dtos, vtos); 796 797 const Register Rindex = R11_scratch1; 798 locals_index(Rindex); 799 __ store_local_double(F15_ftos, Rindex); 800 } 801 802 void TemplateTable::astore() { 803 transition(vtos, vtos); 804 805 const Register Rindex = R11_scratch1; 806 __ pop_ptr(); 807 __ verify_oop_or_return_address(R17_tos, Rindex); 808 locals_index(Rindex); 809 __ store_local_ptr(R17_tos, Rindex); 810 } 811 812 void TemplateTable::wide_istore() { 813 transition(vtos, vtos); 814 815 const Register Rindex = R11_scratch1; 816 __ pop_i(); 817 locals_index_wide(Rindex); 818 __ store_local_int(R17_tos, Rindex); 819 } 820 821 void TemplateTable::wide_lstore() { 822 transition(vtos, vtos); 823 824 const Register Rindex = R11_scratch1; 825 __ pop_l(); 826 locals_index_wide(Rindex); 827 __ store_local_long(R17_tos, Rindex); 828 } 829 830 void TemplateTable::wide_fstore() { 831 transition(vtos, vtos); 832 833 const Register Rindex = R11_scratch1; 834 __ pop_f(); 835 locals_index_wide(Rindex); 836 __ store_local_float(F15_ftos, Rindex); 837 } 838 839 void TemplateTable::wide_dstore() { 840 transition(vtos, vtos); 841 842 const Register Rindex = R11_scratch1; 843 __ pop_d(); 844 locals_index_wide(Rindex); 845 __ store_local_double(F15_ftos, Rindex); 846 } 847 848 void TemplateTable::wide_astore() { 849 transition(vtos, vtos); 850 851 const Register Rindex = R11_scratch1; 852 __ pop_ptr(); 853 __ verify_oop_or_return_address(R17_tos, Rindex); 854 locals_index_wide(Rindex); 855 __ store_local_ptr(R17_tos, Rindex); 856 } 857 858 void TemplateTable::iastore() { 859 transition(itos, vtos); 860 861 const Register Rindex = R3_ARG1, 862 Rstore_addr = R4_ARG2, 863 Rarray = R5_ARG3, 864 Rtemp = R6_ARG4; 865 __ pop_i(Rindex); 866 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 867 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 868 } 869 870 void TemplateTable::lastore() { 871 transition(ltos, vtos); 872 873 const Register Rindex = R3_ARG1, 874 Rstore_addr = R4_ARG2, 875 Rarray = R5_ARG3, 876 Rtemp = R6_ARG4; 877 __ pop_i(Rindex); 878 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 879 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 880 } 881 882 void TemplateTable::fastore() { 883 transition(ftos, vtos); 884 885 const Register Rindex = R3_ARG1, 886 Rstore_addr = R4_ARG2, 887 Rarray = R5_ARG3, 888 Rtemp = R6_ARG4; 889 __ pop_i(Rindex); 890 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 891 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 892 } 893 894 void TemplateTable::dastore() { 895 transition(dtos, vtos); 896 897 const Register Rindex = R3_ARG1, 898 Rstore_addr = R4_ARG2, 899 Rarray = R5_ARG3, 900 Rtemp = R6_ARG4; 901 __ pop_i(Rindex); 902 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 903 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 904 } 905 906 // Pop 3 values from the stack and... 907 void TemplateTable::aastore() { 908 transition(vtos, vtos); 909 910 Label Lstore_ok, Lis_null, Ldone; 911 const Register Rindex = R3_ARG1, 912 Rarray = R4_ARG2, 913 Rscratch = R11_scratch1, 914 Rscratch2 = R12_scratch2, 915 Rarray_klass = R5_ARG3, 916 Rarray_element_klass = Rarray_klass, 917 Rvalue_klass = R6_ARG4, 918 Rstore_addr = R31; // Use register which survives VM call. 919 920 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 921 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 922 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 923 924 __ verify_oop(R17_tos); 925 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 926 // Rindex is dead! 927 Register Rscratch3 = Rindex; 928 929 // Do array store check - check for NULL value first. 930 __ cmpdi(CCR0, R17_tos, 0); 931 __ beq(CCR0, Lis_null); 932 933 __ load_klass(Rarray_klass, Rarray); 934 __ load_klass(Rvalue_klass, R17_tos); 935 936 // Do fast instanceof cache test. 937 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 938 939 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 940 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 941 942 // Fell through: subtype check failed => throw an exception. 943 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 944 __ mtctr(R11_scratch1); 945 __ bctr(); 946 947 __ bind(Lis_null); 948 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 949 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 950 __ profile_null_seen(Rscratch, Rscratch2); 951 __ b(Ldone); 952 953 // Store is OK. 954 __ bind(Lstore_ok); 955 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 956 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 957 958 __ bind(Ldone); 959 // Adjust sp (pops array, index and value). 960 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 961 } 962 963 void TemplateTable::bastore() { 964 transition(itos, vtos); 965 966 const Register Rindex = R11_scratch1, 967 Rarray = R12_scratch2, 968 Rscratch = R3_ARG1; 969 __ pop_i(Rindex); 970 // tos: val 971 // Rarray: array ptr (popped by index_check) 972 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray); 973 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 974 } 975 976 void TemplateTable::castore() { 977 transition(itos, vtos); 978 979 const Register Rindex = R11_scratch1, 980 Rarray = R12_scratch2, 981 Rscratch = R3_ARG1; 982 __ pop_i(Rindex); 983 // tos: val 984 // Rarray: array ptr (popped by index_check) 985 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 986 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 987 } 988 989 void TemplateTable::sastore() { 990 castore(); 991 } 992 993 void TemplateTable::istore(int n) { 994 transition(itos, vtos); 995 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 996 } 997 998 void TemplateTable::lstore(int n) { 999 transition(ltos, vtos); 1000 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1001 } 1002 1003 void TemplateTable::fstore(int n) { 1004 transition(ftos, vtos); 1005 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1006 } 1007 1008 void TemplateTable::dstore(int n) { 1009 transition(dtos, vtos); 1010 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1011 } 1012 1013 void TemplateTable::astore(int n) { 1014 transition(vtos, vtos); 1015 1016 __ pop_ptr(); 1017 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1018 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1019 } 1020 1021 void TemplateTable::pop() { 1022 transition(vtos, vtos); 1023 1024 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1025 } 1026 1027 void TemplateTable::pop2() { 1028 transition(vtos, vtos); 1029 1030 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1031 } 1032 1033 void TemplateTable::dup() { 1034 transition(vtos, vtos); 1035 1036 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1037 __ push_ptr(R11_scratch1); 1038 } 1039 1040 void TemplateTable::dup_x1() { 1041 transition(vtos, vtos); 1042 1043 Register Ra = R11_scratch1, 1044 Rb = R12_scratch2; 1045 // stack: ..., a, b 1046 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1047 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1048 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1049 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1050 __ push_ptr(Rb); 1051 // stack: ..., b, a, b 1052 } 1053 1054 void TemplateTable::dup_x2() { 1055 transition(vtos, vtos); 1056 1057 Register Ra = R11_scratch1, 1058 Rb = R12_scratch2, 1059 Rc = R3_ARG1; 1060 1061 // stack: ..., a, b, c 1062 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1063 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1064 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1065 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1066 // stack: ..., c, b, c 1067 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1068 // stack: ..., c, a, c 1069 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1070 __ push_ptr(Rc); // push c 1071 // stack: ..., c, a, b, c 1072 } 1073 1074 void TemplateTable::dup2() { 1075 transition(vtos, vtos); 1076 1077 Register Ra = R11_scratch1, 1078 Rb = R12_scratch2; 1079 // stack: ..., a, b 1080 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1081 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1082 __ push_2ptrs(Ra, Rb); 1083 // stack: ..., a, b, a, b 1084 } 1085 1086 void TemplateTable::dup2_x1() { 1087 transition(vtos, vtos); 1088 1089 Register Ra = R11_scratch1, 1090 Rb = R12_scratch2, 1091 Rc = R3_ARG1; 1092 // stack: ..., a, b, c 1093 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1094 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1095 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1096 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1097 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1098 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1099 // stack: ..., b, c, a 1100 __ push_2ptrs(Rb, Rc); 1101 // stack: ..., b, c, a, b, c 1102 } 1103 1104 void TemplateTable::dup2_x2() { 1105 transition(vtos, vtos); 1106 1107 Register Ra = R11_scratch1, 1108 Rb = R12_scratch2, 1109 Rc = R3_ARG1, 1110 Rd = R4_ARG2; 1111 // stack: ..., a, b, c, d 1112 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1113 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1114 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1115 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1116 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1117 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1118 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1119 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1120 // stack: ..., c, d, a, b 1121 __ push_2ptrs(Rc, Rd); 1122 // stack: ..., c, d, a, b, c, d 1123 } 1124 1125 void TemplateTable::swap() { 1126 transition(vtos, vtos); 1127 // stack: ..., a, b 1128 1129 Register Ra = R11_scratch1, 1130 Rb = R12_scratch2; 1131 // stack: ..., a, b 1132 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1133 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1134 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1135 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1136 // stack: ..., b, a 1137 } 1138 1139 void TemplateTable::iop2(Operation op) { 1140 transition(itos, itos); 1141 1142 Register Rscratch = R11_scratch1; 1143 1144 __ pop_i(Rscratch); 1145 // tos = number of bits to shift 1146 // Rscratch = value to shift 1147 switch (op) { 1148 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1149 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1150 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1151 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1152 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1153 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1154 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1155 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1156 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1157 default: ShouldNotReachHere(); 1158 } 1159 } 1160 1161 void TemplateTable::lop2(Operation op) { 1162 transition(ltos, ltos); 1163 1164 Register Rscratch = R11_scratch1; 1165 __ pop_l(Rscratch); 1166 switch (op) { 1167 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1168 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1169 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1170 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1171 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1172 default: ShouldNotReachHere(); 1173 } 1174 } 1175 1176 void TemplateTable::idiv() { 1177 transition(itos, itos); 1178 1179 Label Lnormal, Lexception, Ldone; 1180 Register Rdividend = R11_scratch1; // Used by irem. 1181 1182 __ addi(R0, R17_tos, 1); 1183 __ cmplwi(CCR0, R0, 2); 1184 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1185 1186 __ cmpwi(CCR1, R17_tos, 0); 1187 __ beq(CCR1, Lexception); // divisor == 0 1188 1189 __ pop_i(Rdividend); 1190 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1191 __ b(Ldone); 1192 1193 __ bind(Lexception); 1194 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1195 __ mtctr(R11_scratch1); 1196 __ bctr(); 1197 1198 __ align(32, 12); 1199 __ bind(Lnormal); 1200 __ pop_i(Rdividend); 1201 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1202 __ bind(Ldone); 1203 } 1204 1205 void TemplateTable::irem() { 1206 transition(itos, itos); 1207 1208 __ mr(R12_scratch2, R17_tos); 1209 idiv(); 1210 __ mullw(R17_tos, R17_tos, R12_scratch2); 1211 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1212 } 1213 1214 void TemplateTable::lmul() { 1215 transition(ltos, ltos); 1216 1217 __ pop_l(R11_scratch1); 1218 __ mulld(R17_tos, R11_scratch1, R17_tos); 1219 } 1220 1221 void TemplateTable::ldiv() { 1222 transition(ltos, ltos); 1223 1224 Label Lnormal, Lexception, Ldone; 1225 Register Rdividend = R11_scratch1; // Used by lrem. 1226 1227 __ addi(R0, R17_tos, 1); 1228 __ cmpldi(CCR0, R0, 2); 1229 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1230 1231 __ cmpdi(CCR1, R17_tos, 0); 1232 __ beq(CCR1, Lexception); // divisor == 0 1233 1234 __ pop_l(Rdividend); 1235 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1236 __ b(Ldone); 1237 1238 __ bind(Lexception); 1239 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1240 __ mtctr(R11_scratch1); 1241 __ bctr(); 1242 1243 __ align(32, 12); 1244 __ bind(Lnormal); 1245 __ pop_l(Rdividend); 1246 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1247 __ bind(Ldone); 1248 } 1249 1250 void TemplateTable::lrem() { 1251 transition(ltos, ltos); 1252 1253 __ mr(R12_scratch2, R17_tos); 1254 ldiv(); 1255 __ mulld(R17_tos, R17_tos, R12_scratch2); 1256 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1257 } 1258 1259 void TemplateTable::lshl() { 1260 transition(itos, ltos); 1261 1262 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1263 __ pop_l(R11_scratch1); 1264 __ sld(R17_tos, R11_scratch1, R17_tos); 1265 } 1266 1267 void TemplateTable::lshr() { 1268 transition(itos, ltos); 1269 1270 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1271 __ pop_l(R11_scratch1); 1272 __ srad(R17_tos, R11_scratch1, R17_tos); 1273 } 1274 1275 void TemplateTable::lushr() { 1276 transition(itos, ltos); 1277 1278 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1279 __ pop_l(R11_scratch1); 1280 __ srd(R17_tos, R11_scratch1, R17_tos); 1281 } 1282 1283 void TemplateTable::fop2(Operation op) { 1284 transition(ftos, ftos); 1285 1286 switch (op) { 1287 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1288 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1289 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1290 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1291 case rem: 1292 __ pop_f(F1_ARG1); 1293 __ fmr(F2_ARG2, F15_ftos); 1294 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1295 __ fmr(F15_ftos, F1_RET); 1296 break; 1297 1298 default: ShouldNotReachHere(); 1299 } 1300 } 1301 1302 void TemplateTable::dop2(Operation op) { 1303 transition(dtos, dtos); 1304 1305 switch (op) { 1306 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1307 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1308 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1309 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1310 case rem: 1311 __ pop_d(F1_ARG1); 1312 __ fmr(F2_ARG2, F15_ftos); 1313 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1314 __ fmr(F15_ftos, F1_RET); 1315 break; 1316 1317 default: ShouldNotReachHere(); 1318 } 1319 } 1320 1321 // Negate the value in the TOS cache. 1322 void TemplateTable::ineg() { 1323 transition(itos, itos); 1324 1325 __ neg(R17_tos, R17_tos); 1326 } 1327 1328 // Negate the value in the TOS cache. 1329 void TemplateTable::lneg() { 1330 transition(ltos, ltos); 1331 1332 __ neg(R17_tos, R17_tos); 1333 } 1334 1335 void TemplateTable::fneg() { 1336 transition(ftos, ftos); 1337 1338 __ fneg(F15_ftos, F15_ftos); 1339 } 1340 1341 void TemplateTable::dneg() { 1342 transition(dtos, dtos); 1343 1344 __ fneg(F15_ftos, F15_ftos); 1345 } 1346 1347 // Increments a local variable in place. 1348 void TemplateTable::iinc() { 1349 transition(vtos, vtos); 1350 1351 const Register Rindex = R11_scratch1, 1352 Rincrement = R0, 1353 Rvalue = R12_scratch2; 1354 1355 locals_index(Rindex); // Load locals index from bytecode stream. 1356 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1357 __ extsb(Rincrement, Rincrement); 1358 1359 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1360 1361 __ add(Rvalue, Rincrement, Rvalue); 1362 __ stw(Rvalue, 0, Rindex); 1363 } 1364 1365 void TemplateTable::wide_iinc() { 1366 transition(vtos, vtos); 1367 1368 Register Rindex = R11_scratch1, 1369 Rlocals_addr = Rindex, 1370 Rincr = R12_scratch2; 1371 locals_index_wide(Rindex); 1372 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1373 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1374 __ add(R17_tos, Rincr, R17_tos); 1375 __ stw(R17_tos, 0, Rlocals_addr); 1376 } 1377 1378 void TemplateTable::convert() { 1379 // %%%%% Factor this first part accross platforms 1380 #ifdef ASSERT 1381 TosState tos_in = ilgl; 1382 TosState tos_out = ilgl; 1383 switch (bytecode()) { 1384 case Bytecodes::_i2l: // fall through 1385 case Bytecodes::_i2f: // fall through 1386 case Bytecodes::_i2d: // fall through 1387 case Bytecodes::_i2b: // fall through 1388 case Bytecodes::_i2c: // fall through 1389 case Bytecodes::_i2s: tos_in = itos; break; 1390 case Bytecodes::_l2i: // fall through 1391 case Bytecodes::_l2f: // fall through 1392 case Bytecodes::_l2d: tos_in = ltos; break; 1393 case Bytecodes::_f2i: // fall through 1394 case Bytecodes::_f2l: // fall through 1395 case Bytecodes::_f2d: tos_in = ftos; break; 1396 case Bytecodes::_d2i: // fall through 1397 case Bytecodes::_d2l: // fall through 1398 case Bytecodes::_d2f: tos_in = dtos; break; 1399 default : ShouldNotReachHere(); 1400 } 1401 switch (bytecode()) { 1402 case Bytecodes::_l2i: // fall through 1403 case Bytecodes::_f2i: // fall through 1404 case Bytecodes::_d2i: // fall through 1405 case Bytecodes::_i2b: // fall through 1406 case Bytecodes::_i2c: // fall through 1407 case Bytecodes::_i2s: tos_out = itos; break; 1408 case Bytecodes::_i2l: // fall through 1409 case Bytecodes::_f2l: // fall through 1410 case Bytecodes::_d2l: tos_out = ltos; break; 1411 case Bytecodes::_i2f: // fall through 1412 case Bytecodes::_l2f: // fall through 1413 case Bytecodes::_d2f: tos_out = ftos; break; 1414 case Bytecodes::_i2d: // fall through 1415 case Bytecodes::_l2d: // fall through 1416 case Bytecodes::_f2d: tos_out = dtos; break; 1417 default : ShouldNotReachHere(); 1418 } 1419 transition(tos_in, tos_out); 1420 #endif 1421 1422 // Conversion 1423 Label done; 1424 switch (bytecode()) { 1425 case Bytecodes::_i2l: 1426 __ extsw(R17_tos, R17_tos); 1427 break; 1428 1429 case Bytecodes::_l2i: 1430 // Nothing to do, we'll continue to work with the lower bits. 1431 break; 1432 1433 case Bytecodes::_i2b: 1434 __ extsb(R17_tos, R17_tos); 1435 break; 1436 1437 case Bytecodes::_i2c: 1438 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1439 break; 1440 1441 case Bytecodes::_i2s: 1442 __ extsh(R17_tos, R17_tos); 1443 break; 1444 1445 case Bytecodes::_i2d: 1446 __ extsw(R17_tos, R17_tos); 1447 case Bytecodes::_l2d: 1448 __ push_l_pop_d(); 1449 __ fcfid(F15_ftos, F15_ftos); 1450 break; 1451 1452 case Bytecodes::_i2f: 1453 __ extsw(R17_tos, R17_tos); 1454 __ push_l_pop_d(); 1455 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1456 // Comment: alternatively, load with sign extend could be done by lfiwax. 1457 __ fcfids(F15_ftos, F15_ftos); 1458 } else { 1459 __ fcfid(F15_ftos, F15_ftos); 1460 __ frsp(F15_ftos, F15_ftos); 1461 } 1462 break; 1463 1464 case Bytecodes::_l2f: 1465 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1466 __ push_l_pop_d(); 1467 __ fcfids(F15_ftos, F15_ftos); 1468 } else { 1469 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1470 __ mr(R3_ARG1, R17_tos); 1471 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1472 __ fmr(F15_ftos, F1_RET); 1473 } 1474 break; 1475 1476 case Bytecodes::_f2d: 1477 // empty 1478 break; 1479 1480 case Bytecodes::_d2f: 1481 __ frsp(F15_ftos, F15_ftos); 1482 break; 1483 1484 case Bytecodes::_d2i: 1485 case Bytecodes::_f2i: 1486 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1487 __ li(R17_tos, 0); // 0 in case of NAN 1488 __ bso(CCR0, done); 1489 __ fctiwz(F15_ftos, F15_ftos); 1490 __ push_d_pop_l(); 1491 break; 1492 1493 case Bytecodes::_d2l: 1494 case Bytecodes::_f2l: 1495 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1496 __ li(R17_tos, 0); // 0 in case of NAN 1497 __ bso(CCR0, done); 1498 __ fctidz(F15_ftos, F15_ftos); 1499 __ push_d_pop_l(); 1500 break; 1501 1502 default: ShouldNotReachHere(); 1503 } 1504 __ bind(done); 1505 } 1506 1507 // Long compare 1508 void TemplateTable::lcmp() { 1509 transition(ltos, itos); 1510 1511 const Register Rscratch = R11_scratch1; 1512 __ pop_l(Rscratch); // first operand, deeper in stack 1513 1514 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1515 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1516 __ srwi(Rscratch, R17_tos, 30); 1517 __ srawi(R17_tos, R17_tos, 31); 1518 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1519 } 1520 1521 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1522 // unordered_result == -1 => fcmpl or dcmpl 1523 // unordered_result == 1 => fcmpg or dcmpg 1524 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1525 const FloatRegister Rfirst = F0_SCRATCH, 1526 Rsecond = F15_ftos; 1527 const Register Rscratch = R11_scratch1; 1528 1529 if (is_float) { 1530 __ pop_f(Rfirst); 1531 } else { 1532 __ pop_d(Rfirst); 1533 } 1534 1535 Label Lunordered, Ldone; 1536 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1537 if (unordered_result) { 1538 __ bso(CCR0, Lunordered); 1539 } 1540 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1541 __ srwi(Rscratch, R17_tos, 30); 1542 __ srawi(R17_tos, R17_tos, 31); 1543 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1544 if (unordered_result) { 1545 __ b(Ldone); 1546 __ bind(Lunordered); 1547 __ load_const_optimized(R17_tos, unordered_result); 1548 } 1549 __ bind(Ldone); 1550 } 1551 1552 // Branch_conditional which takes TemplateTable::Condition. 1553 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1554 bool positive = false; 1555 Assembler::Condition cond = Assembler::equal; 1556 switch (cc) { 1557 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1558 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1559 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1560 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1561 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1562 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1563 default: ShouldNotReachHere(); 1564 } 1565 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1566 int bi = Assembler::bi0(crx, cond); 1567 __ bc(bo, bi, L); 1568 } 1569 1570 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1571 1572 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1573 __ verify_thread(); 1574 1575 const Register Rscratch1 = R11_scratch1, 1576 Rscratch2 = R12_scratch2, 1577 Rscratch3 = R3_ARG1, 1578 R4_counters = R4_ARG2, 1579 bumped_count = R31, 1580 Rdisp = R22_tmp2; 1581 1582 __ profile_taken_branch(Rscratch1, bumped_count); 1583 1584 // Get (wide) offset. 1585 if (is_wide) { 1586 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1587 } else { 1588 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1589 } 1590 1591 // -------------------------------------------------------------------------- 1592 // Handle all the JSR stuff here, then exit. 1593 // It's much shorter and cleaner than intermingling with the 1594 // non-JSR normal-branch stuff occurring below. 1595 if (is_jsr) { 1596 // Compute return address as bci in Otos_i. 1597 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1598 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1599 __ subf(R17_tos, Rscratch1, Rscratch2); 1600 1601 // Bump bcp to target of JSR. 1602 __ add(R14_bcp, Rdisp, R14_bcp); 1603 // Push returnAddress for "ret" on stack. 1604 __ push_ptr(R17_tos); 1605 // And away we go! 1606 __ dispatch_next(vtos); 1607 return; 1608 } 1609 1610 // -------------------------------------------------------------------------- 1611 // Normal (non-jsr) branch handling 1612 1613 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1614 if (increment_invocation_counter_for_backward_branches) { 1615 //__ unimplemented("branch invocation counter"); 1616 1617 Label Lforward; 1618 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1619 1620 // Check branch direction. 1621 __ cmpdi(CCR0, Rdisp, 0); 1622 __ bgt(CCR0, Lforward); 1623 1624 __ get_method_counters(R19_method, R4_counters, Lforward); 1625 1626 if (TieredCompilation) { 1627 Label Lno_mdo, Loverflow; 1628 const int increment = InvocationCounter::count_increment; 1629 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1630 if (ProfileInterpreter) { 1631 Register Rmdo = Rscratch1; 1632 1633 // If no method data exists, go to profile_continue. 1634 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1635 __ cmpdi(CCR0, Rmdo, 0); 1636 __ beq(CCR0, Lno_mdo); 1637 1638 // Increment backedge counter in the MDO. 1639 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1640 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1641 __ load_const_optimized(Rscratch3, mask, R0); 1642 __ addi(Rscratch2, Rscratch2, increment); 1643 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1644 __ and_(Rscratch3, Rscratch2, Rscratch3); 1645 __ bne(CCR0, Lforward); 1646 __ b(Loverflow); 1647 } 1648 1649 // If there's no MDO, increment counter in method. 1650 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1651 __ bind(Lno_mdo); 1652 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1653 __ load_const_optimized(Rscratch3, mask, R0); 1654 __ addi(Rscratch2, Rscratch2, increment); 1655 __ stw(Rscratch2, mo_bc_offs, R19_method); 1656 __ and_(Rscratch3, Rscratch2, Rscratch3); 1657 __ bne(CCR0, Lforward); 1658 1659 __ bind(Loverflow); 1660 1661 // Notify point for loop, pass branch bytecode. 1662 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1663 1664 // Was an OSR adapter generated? 1665 // O0 = osr nmethod 1666 __ cmpdi(CCR0, R3_RET, 0); 1667 __ beq(CCR0, Lforward); 1668 1669 // Has the nmethod been invalidated already? 1670 __ lbz(R0, nmethod::state_offset(), R3_RET); 1671 __ cmpwi(CCR0, R0, nmethod::in_use); 1672 __ bne(CCR0, Lforward); 1673 1674 // Migrate the interpreter frame off of the stack. 1675 // We can use all registers because we will not return to interpreter from this point. 1676 1677 // Save nmethod. 1678 const Register osr_nmethod = R31; 1679 __ mr(osr_nmethod, R3_RET); 1680 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1681 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1682 __ reset_last_Java_frame(); 1683 // OSR buffer is in ARG1. 1684 1685 // Remove the interpreter frame. 1686 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1687 1688 // Jump to the osr code. 1689 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1690 __ mtlr(R0); 1691 __ mtctr(R11_scratch1); 1692 __ bctr(); 1693 1694 } else { 1695 1696 const Register invoke_ctr = Rscratch1; 1697 // Update Backedge branch separately from invocations. 1698 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1699 1700 if (ProfileInterpreter) { 1701 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1702 if (UseOnStackReplacement) { 1703 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1704 } 1705 } else { 1706 if (UseOnStackReplacement) { 1707 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1708 } 1709 } 1710 } 1711 1712 __ bind(Lforward); 1713 1714 } else { 1715 // Bump bytecode pointer by displacement (take the branch). 1716 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1717 } 1718 // Continue with bytecode @ target. 1719 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1720 // %%%%% and changing dispatch_next to dispatch_only. 1721 __ dispatch_next(vtos); 1722 } 1723 1724 // Helper function for if_cmp* methods below. 1725 // Factored out common compare and branch code. 1726 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1727 Label Lnot_taken; 1728 // Note: The condition code we get is the condition under which we 1729 // *fall through*! So we have to inverse the CC here. 1730 1731 if (is_jint) { 1732 if (cmp0) { 1733 __ cmpwi(CCR0, Rfirst, 0); 1734 } else { 1735 __ cmpw(CCR0, Rfirst, Rsecond); 1736 } 1737 } else { 1738 if (cmp0) { 1739 __ cmpdi(CCR0, Rfirst, 0); 1740 } else { 1741 __ cmpd(CCR0, Rfirst, Rsecond); 1742 } 1743 } 1744 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1745 1746 // Conition is false => Jump! 1747 branch(false, false); 1748 1749 // Condition is not true => Continue. 1750 __ align(32, 12); 1751 __ bind(Lnot_taken); 1752 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1753 } 1754 1755 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1756 void TemplateTable::if_0cmp(Condition cc) { 1757 transition(itos, vtos); 1758 1759 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1760 } 1761 1762 // Compare integer values and fall through if CC holds, branch away otherwise. 1763 // 1764 // Interface: 1765 // - Rfirst: First operand (older stack value) 1766 // - tos: Second operand (younger stack value) 1767 void TemplateTable::if_icmp(Condition cc) { 1768 transition(itos, vtos); 1769 1770 const Register Rfirst = R0, 1771 Rsecond = R17_tos; 1772 1773 __ pop_i(Rfirst); 1774 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1775 } 1776 1777 void TemplateTable::if_nullcmp(Condition cc) { 1778 transition(atos, vtos); 1779 1780 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1781 } 1782 1783 void TemplateTable::if_acmp(Condition cc) { 1784 transition(atos, vtos); 1785 1786 const Register Rfirst = R0, 1787 Rsecond = R17_tos; 1788 1789 __ pop_ptr(Rfirst); 1790 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1791 } 1792 1793 void TemplateTable::ret() { 1794 locals_index(R11_scratch1); 1795 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1796 1797 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1798 1799 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1800 __ add(R11_scratch1, R17_tos, R11_scratch1); 1801 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1802 __ dispatch_next(vtos); 1803 } 1804 1805 void TemplateTable::wide_ret() { 1806 transition(vtos, vtos); 1807 1808 const Register Rindex = R3_ARG1, 1809 Rscratch1 = R11_scratch1, 1810 Rscratch2 = R12_scratch2; 1811 1812 locals_index_wide(Rindex); 1813 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1814 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1815 // Tos now contains the bci, compute the bcp from that. 1816 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1817 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1818 __ add(R14_bcp, Rscratch1, Rscratch2); 1819 __ dispatch_next(vtos); 1820 } 1821 1822 void TemplateTable::tableswitch() { 1823 transition(itos, vtos); 1824 1825 Label Ldispatch, Ldefault_case; 1826 Register Rlow_byte = R3_ARG1, 1827 Rindex = Rlow_byte, 1828 Rhigh_byte = R4_ARG2, 1829 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1830 Rscratch1 = R11_scratch1, 1831 Rscratch2 = R12_scratch2, 1832 Roffset = R6_ARG4; 1833 1834 // Align bcp. 1835 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1836 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1837 1838 // Load lo & hi. 1839 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1840 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1841 1842 // Check for default case (=index outside [low,high]). 1843 __ cmpw(CCR0, R17_tos, Rlow_byte); 1844 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1845 __ blt(CCR0, Ldefault_case); 1846 __ bgt(CCR1, Ldefault_case); 1847 1848 // Lookup dispatch offset. 1849 __ sub(Rindex, R17_tos, Rlow_byte); 1850 __ extsw(Rindex, Rindex); 1851 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1852 __ sldi(Rindex, Rindex, LogBytesPerInt); 1853 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1854 #if defined(VM_LITTLE_ENDIAN) 1855 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1856 __ extsw(Roffset, Roffset); 1857 #else 1858 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1859 #endif 1860 __ b(Ldispatch); 1861 1862 __ bind(Ldefault_case); 1863 __ profile_switch_default(Rhigh_byte, Rscratch1); 1864 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1865 1866 __ bind(Ldispatch); 1867 1868 __ add(R14_bcp, Roffset, R14_bcp); 1869 __ dispatch_next(vtos); 1870 } 1871 1872 void TemplateTable::lookupswitch() { 1873 transition(itos, itos); 1874 __ stop("lookupswitch bytecode should have been rewritten"); 1875 } 1876 1877 // Table switch using linear search through cases. 1878 // Bytecode stream format: 1879 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1880 // Note: Everything is big-endian format here. 1881 void TemplateTable::fast_linearswitch() { 1882 transition(itos, vtos); 1883 1884 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1885 Register Rcount = R3_ARG1, 1886 Rcurrent_pair = R4_ARG2, 1887 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1888 Roffset = R31, // Might need to survive C call. 1889 Rvalue = R12_scratch2, 1890 Rscratch = R11_scratch1, 1891 Rcmp_value = R17_tos; 1892 1893 // Align bcp. 1894 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1895 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1896 1897 // Setup loop counter and limit. 1898 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1899 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1900 1901 __ mtctr(Rcount); 1902 __ cmpwi(CCR0, Rcount, 0); 1903 __ bne(CCR0, Lloop_entry); 1904 1905 // Default case 1906 __ bind(Ldefault_case); 1907 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1908 if (ProfileInterpreter) { 1909 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1910 } 1911 __ b(Lcontinue_execution); 1912 1913 // Next iteration 1914 __ bind(Lsearch_loop); 1915 __ bdz(Ldefault_case); 1916 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1917 __ bind(Lloop_entry); 1918 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1919 __ cmpw(CCR0, Rvalue, Rcmp_value); 1920 __ bne(CCR0, Lsearch_loop); 1921 1922 // Found, load offset. 1923 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1924 // Calculate case index and profile 1925 __ mfctr(Rcurrent_pair); 1926 if (ProfileInterpreter) { 1927 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1928 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1929 } 1930 1931 __ bind(Lcontinue_execution); 1932 __ add(R14_bcp, Roffset, R14_bcp); 1933 __ dispatch_next(vtos); 1934 } 1935 1936 // Table switch using binary search (value/offset pairs are ordered). 1937 // Bytecode stream format: 1938 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1939 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1940 void TemplateTable::fast_binaryswitch() { 1941 1942 transition(itos, vtos); 1943 // Implementation using the following core algorithm: (copied from Intel) 1944 // 1945 // int binary_search(int key, LookupswitchPair* array, int n) { 1946 // // Binary search according to "Methodik des Programmierens" by 1947 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1948 // int i = 0; 1949 // int j = n; 1950 // while (i+1 < j) { 1951 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1952 // // with Q: for all i: 0 <= i < n: key < a[i] 1953 // // where a stands for the array and assuming that the (inexisting) 1954 // // element a[n] is infinitely big. 1955 // int h = (i + j) >> 1; 1956 // // i < h < j 1957 // if (key < array[h].fast_match()) { 1958 // j = h; 1959 // } else { 1960 // i = h; 1961 // } 1962 // } 1963 // // R: a[i] <= key < a[i+1] or Q 1964 // // (i.e., if key is within array, i is the correct index) 1965 // return i; 1966 // } 1967 1968 // register allocation 1969 const Register Rkey = R17_tos; // already set (tosca) 1970 const Register Rarray = R3_ARG1; 1971 const Register Ri = R4_ARG2; 1972 const Register Rj = R5_ARG3; 1973 const Register Rh = R6_ARG4; 1974 const Register Rscratch = R11_scratch1; 1975 1976 const int log_entry_size = 3; 1977 const int entry_size = 1 << log_entry_size; 1978 1979 Label found; 1980 1981 // Find Array start, 1982 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1983 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1984 1985 // initialize i & j 1986 __ li(Ri,0); 1987 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 1988 1989 // and start. 1990 Label entry; 1991 __ b(entry); 1992 1993 // binary search loop 1994 { Label loop; 1995 __ bind(loop); 1996 // int h = (i + j) >> 1; 1997 __ srdi(Rh, Rh, 1); 1998 // if (key < array[h].fast_match()) { 1999 // j = h; 2000 // } else { 2001 // i = h; 2002 // } 2003 __ sldi(Rscratch, Rh, log_entry_size); 2004 #if defined(VM_LITTLE_ENDIAN) 2005 __ lwbrx(Rscratch, Rscratch, Rarray); 2006 #else 2007 __ lwzx(Rscratch, Rscratch, Rarray); 2008 #endif 2009 2010 // if (key < current value) 2011 // Rh = Rj 2012 // else 2013 // Rh = Ri 2014 Label Lgreater; 2015 __ cmpw(CCR0, Rkey, Rscratch); 2016 __ bge(CCR0, Lgreater); 2017 __ mr(Rj, Rh); 2018 __ b(entry); 2019 __ bind(Lgreater); 2020 __ mr(Ri, Rh); 2021 2022 // while (i+1 < j) 2023 __ bind(entry); 2024 __ addi(Rscratch, Ri, 1); 2025 __ cmpw(CCR0, Rscratch, Rj); 2026 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2027 2028 __ blt(CCR0, loop); 2029 } 2030 2031 // End of binary search, result index is i (must check again!). 2032 Label default_case; 2033 Label continue_execution; 2034 if (ProfileInterpreter) { 2035 __ mr(Rh, Ri); // Save index in i for profiling. 2036 } 2037 // Ri = value offset 2038 __ sldi(Ri, Ri, log_entry_size); 2039 __ add(Ri, Ri, Rarray); 2040 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2041 2042 Label not_found; 2043 // Ri = offset offset 2044 __ cmpw(CCR0, Rkey, Rscratch); 2045 __ beq(CCR0, not_found); 2046 // entry not found -> j = default offset 2047 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2048 __ b(default_case); 2049 2050 __ bind(not_found); 2051 // entry found -> j = offset 2052 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2053 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2054 2055 if (ProfileInterpreter) { 2056 __ b(continue_execution); 2057 } 2058 2059 __ bind(default_case); // fall through (if not profiling) 2060 __ profile_switch_default(Ri, Rscratch); 2061 2062 __ bind(continue_execution); 2063 2064 __ extsw(Rj, Rj); 2065 __ add(R14_bcp, Rj, R14_bcp); 2066 __ dispatch_next(vtos); 2067 } 2068 2069 void TemplateTable::_return(TosState state) { 2070 transition(state, state); 2071 assert(_desc->calls_vm(), 2072 "inconsistent calls_vm information"); // call in remove_activation 2073 2074 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2075 2076 Register Rscratch = R11_scratch1, 2077 Rklass = R12_scratch2, 2078 Rklass_flags = Rklass; 2079 Label Lskip_register_finalizer; 2080 2081 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2082 assert(state == vtos, "only valid state"); 2083 __ ld(R17_tos, 0, R18_locals); 2084 2085 // Load klass of this obj. 2086 __ load_klass(Rklass, R17_tos); 2087 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2088 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2089 __ bfalse(CCR0, Lskip_register_finalizer); 2090 2091 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2092 2093 __ align(32, 12); 2094 __ bind(Lskip_register_finalizer); 2095 } 2096 2097 // Move the result value into the correct register and remove memory stack frame. 2098 __ remove_activation(state, /* throw_monitor_exception */ true); 2099 // Restoration of lr done by remove_activation. 2100 switch (state) { 2101 case ltos: 2102 case btos: 2103 case ctos: 2104 case stos: 2105 case atos: 2106 case itos: __ mr(R3_RET, R17_tos); break; 2107 case ftos: 2108 case dtos: __ fmr(F1_RET, F15_ftos); break; 2109 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2110 // to get visible before the reference to the object gets stored anywhere. 2111 __ membar(Assembler::StoreStore); break; 2112 default : ShouldNotReachHere(); 2113 } 2114 __ blr(); 2115 } 2116 2117 // ============================================================================ 2118 // Constant pool cache access 2119 // 2120 // Memory ordering: 2121 // 2122 // Like done in C++ interpreter, we load the fields 2123 // - _indices 2124 // - _f12_oop 2125 // acquired, because these are asked if the cache is already resolved. We don't 2126 // want to float loads above this check. 2127 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2128 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2129 2130 // Call into the VM if call site is not yet resolved 2131 // 2132 // Input regs: 2133 // - None, all passed regs are outputs. 2134 // 2135 // Returns: 2136 // - Rcache: The const pool cache entry that contains the resolved result. 2137 // - Rresult: Either noreg or output for f1/f2. 2138 // 2139 // Kills: 2140 // - Rscratch 2141 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2142 2143 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2144 Label Lresolved, Ldone; 2145 2146 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2147 // We are resolved if the indices offset contains the current bytecode. 2148 #if defined(VM_LITTLE_ENDIAN) 2149 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2150 #else 2151 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2152 #endif 2153 // Acquire by cmp-br-isync (see below). 2154 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2155 __ beq(CCR0, Lresolved); 2156 2157 address entry = NULL; 2158 switch (bytecode()) { 2159 case Bytecodes::_getstatic : // fall through 2160 case Bytecodes::_putstatic : // fall through 2161 case Bytecodes::_getfield : // fall through 2162 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2163 case Bytecodes::_invokevirtual : // fall through 2164 case Bytecodes::_invokespecial : // fall through 2165 case Bytecodes::_invokestatic : // fall through 2166 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2167 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2168 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2169 default : ShouldNotReachHere(); break; 2170 } 2171 __ li(R4_ARG2, (int)bytecode()); 2172 __ call_VM(noreg, entry, R4_ARG2, true); 2173 2174 // Update registers with resolved info. 2175 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2176 __ b(Ldone); 2177 2178 __ bind(Lresolved); 2179 __ isync(); // Order load wrt. succeeding loads. 2180 __ bind(Ldone); 2181 } 2182 2183 // Load the constant pool cache entry at field accesses into registers. 2184 // The Rcache and Rindex registers must be set before call. 2185 // Input: 2186 // - Rcache, Rindex 2187 // Output: 2188 // - Robj, Roffset, Rflags 2189 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2190 Register Rcache, 2191 Register Rindex /* unused on PPC64 */, 2192 Register Roffset, 2193 Register Rflags, 2194 bool is_static = false) { 2195 assert_different_registers(Rcache, Rflags, Roffset); 2196 // assert(Rindex == noreg, "parameter not used on PPC64"); 2197 2198 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2199 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2200 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2201 if (is_static) { 2202 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2203 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2204 // Acquire not needed here. Following access has an address dependency on this value. 2205 } 2206 } 2207 2208 // Load the constant pool cache entry at invokes into registers. 2209 // Resolve if necessary. 2210 2211 // Input Registers: 2212 // - None, bcp is used, though 2213 // 2214 // Return registers: 2215 // - Rmethod (f1 field or f2 if invokevirtual) 2216 // - Ritable_index (f2 field) 2217 // - Rflags (flags field) 2218 // 2219 // Kills: 2220 // - R21 2221 // 2222 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2223 Register Rmethod, 2224 Register Ritable_index, 2225 Register Rflags, 2226 bool is_invokevirtual, 2227 bool is_invokevfinal, 2228 bool is_invokedynamic) { 2229 2230 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2231 // Determine constant pool cache field offsets. 2232 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2233 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2234 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2235 // Access constant pool cache fields. 2236 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2237 2238 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2239 2240 if (is_invokevfinal) { 2241 assert(Ritable_index == noreg, "register not used"); 2242 // Already resolved. 2243 __ get_cache_and_index_at_bcp(Rcache, 1); 2244 } else { 2245 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2246 } 2247 2248 __ ld(Rmethod, method_offset, Rcache); 2249 __ ld(Rflags, flags_offset, Rcache); 2250 2251 if (Ritable_index != noreg) { 2252 __ ld(Ritable_index, index_offset, Rcache); 2253 } 2254 } 2255 2256 // ============================================================================ 2257 // Field access 2258 2259 // Volatile variables demand their effects be made known to all CPU's 2260 // in order. Store buffers on most chips allow reads & writes to 2261 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2262 // without some kind of memory barrier (i.e., it's not sufficient that 2263 // the interpreter does not reorder volatile references, the hardware 2264 // also must not reorder them). 2265 // 2266 // According to the new Java Memory Model (JMM): 2267 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2268 // writes act as aquire & release, so: 2269 // (2) A read cannot let unrelated NON-volatile memory refs that 2270 // happen after the read float up to before the read. It's OK for 2271 // non-volatile memory refs that happen before the volatile read to 2272 // float down below it. 2273 // (3) Similar a volatile write cannot let unrelated NON-volatile 2274 // memory refs that happen BEFORE the write float down to after the 2275 // write. It's OK for non-volatile memory refs that happen after the 2276 // volatile write to float up before it. 2277 // 2278 // We only put in barriers around volatile refs (they are expensive), 2279 // not _between_ memory refs (that would require us to track the 2280 // flavor of the previous memory refs). Requirements (2) and (3) 2281 // require some barriers before volatile stores and after volatile 2282 // loads. These nearly cover requirement (1) but miss the 2283 // volatile-store-volatile-load case. This final case is placed after 2284 // volatile-stores although it could just as well go before 2285 // volatile-loads. 2286 2287 // The registers cache and index expected to be set before call. 2288 // Correct values of the cache and index registers are preserved. 2289 // Kills: 2290 // Rcache (if has_tos) 2291 // Rscratch 2292 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2293 2294 assert_different_registers(Rcache, Rscratch); 2295 2296 if (JvmtiExport::can_post_field_access()) { 2297 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2298 Label Lno_field_access_post; 2299 2300 // Check if post field access in enabled. 2301 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2302 __ lwz(Rscratch, offs, Rscratch); 2303 2304 __ cmpwi(CCR0, Rscratch, 0); 2305 __ beq(CCR0, Lno_field_access_post); 2306 2307 // Post access enabled - do it! 2308 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2309 if (is_static) { 2310 __ li(R17_tos, 0); 2311 } else { 2312 if (has_tos) { 2313 // The fast bytecode versions have obj ptr in register. 2314 // Thus, save object pointer before call_VM() clobbers it 2315 // put object on tos where GC wants it. 2316 __ push_ptr(R17_tos); 2317 } else { 2318 // Load top of stack (do not pop the value off the stack). 2319 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2320 } 2321 __ verify_oop(R17_tos); 2322 } 2323 // tos: object pointer or NULL if static 2324 // cache: cache entry pointer 2325 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2326 if (!is_static && has_tos) { 2327 // Restore object pointer. 2328 __ pop_ptr(R17_tos); 2329 __ verify_oop(R17_tos); 2330 } else { 2331 // Cache is still needed to get class or obj. 2332 __ get_cache_and_index_at_bcp(Rcache, 1); 2333 } 2334 2335 __ align(32, 12); 2336 __ bind(Lno_field_access_post); 2337 } 2338 } 2339 2340 // kills R11_scratch1 2341 void TemplateTable::pop_and_check_object(Register Roop) { 2342 Register Rtmp = R11_scratch1; 2343 2344 assert_different_registers(Rtmp, Roop); 2345 __ pop_ptr(Roop); 2346 // For field access must check obj. 2347 __ null_check_throw(Roop, -1, Rtmp); 2348 __ verify_oop(Roop); 2349 } 2350 2351 // PPC64: implement volatile loads as fence-store-acquire. 2352 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2353 transition(vtos, vtos); 2354 2355 Label Lacquire, Lisync; 2356 2357 const Register Rcache = R3_ARG1, 2358 Rclass_or_obj = R22_tmp2, 2359 Roffset = R23_tmp3, 2360 Rflags = R31, 2361 Rbtable = R5_ARG3, 2362 Rbc = R6_ARG4, 2363 Rscratch = R12_scratch2; 2364 2365 static address field_branch_table[number_of_states], 2366 static_branch_table[number_of_states]; 2367 2368 address* branch_table = is_static ? static_branch_table : field_branch_table; 2369 2370 // Get field offset. 2371 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2372 2373 // JVMTI support 2374 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2375 2376 // Load after possible GC. 2377 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2378 2379 // Load pointer to branch table. 2380 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2381 2382 // Get volatile flag. 2383 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2384 // Note: sync is needed before volatile load on PPC64. 2385 2386 // Check field type. 2387 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2388 2389 #ifdef ASSERT 2390 Label LFlagInvalid; 2391 __ cmpldi(CCR0, Rflags, number_of_states); 2392 __ bge(CCR0, LFlagInvalid); 2393 #endif 2394 2395 // Load from branch table and dispatch (volatile case: one instruction ahead). 2396 __ sldi(Rflags, Rflags, LogBytesPerWord); 2397 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2398 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2399 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2400 } 2401 __ ldx(Rbtable, Rbtable, Rflags); 2402 2403 // Get the obj from stack. 2404 if (!is_static) { 2405 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2406 } else { 2407 __ verify_oop(Rclass_or_obj); 2408 } 2409 2410 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2411 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2412 } 2413 __ mtctr(Rbtable); 2414 __ bctr(); 2415 2416 #ifdef ASSERT 2417 __ bind(LFlagInvalid); 2418 __ stop("got invalid flag", 0x654); 2419 2420 // __ bind(Lvtos); 2421 address pc_before_fence = __ pc(); 2422 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2423 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2424 assert(branch_table[vtos] == 0, "can't compute twice"); 2425 branch_table[vtos] = __ pc(); // non-volatile_entry point 2426 __ stop("vtos unexpected", 0x655); 2427 #endif 2428 2429 __ align(32, 28, 28); // Align load. 2430 // __ bind(Ldtos); 2431 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2432 assert(branch_table[dtos] == 0, "can't compute twice"); 2433 branch_table[dtos] = __ pc(); // non-volatile_entry point 2434 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2435 __ push(dtos); 2436 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2437 { 2438 Label acquire_double; 2439 __ beq(CCR6, acquire_double); // Volatile? 2440 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2441 2442 __ bind(acquire_double); 2443 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2444 __ beq_predict_taken(CCR0, Lisync); 2445 __ b(Lisync); // In case of NAN. 2446 } 2447 2448 __ align(32, 28, 28); // Align load. 2449 // __ bind(Lftos); 2450 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2451 assert(branch_table[ftos] == 0, "can't compute twice"); 2452 branch_table[ftos] = __ pc(); // non-volatile_entry point 2453 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2454 __ push(ftos); 2455 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2456 { 2457 Label acquire_float; 2458 __ beq(CCR6, acquire_float); // Volatile? 2459 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2460 2461 __ bind(acquire_float); 2462 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2463 __ beq_predict_taken(CCR0, Lisync); 2464 __ b(Lisync); // In case of NAN. 2465 } 2466 2467 __ align(32, 28, 28); // Align load. 2468 // __ bind(Litos); 2469 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2470 assert(branch_table[itos] == 0, "can't compute twice"); 2471 branch_table[itos] = __ pc(); // non-volatile_entry point 2472 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2473 __ push(itos); 2474 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2475 __ beq(CCR6, Lacquire); // Volatile? 2476 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2477 2478 __ align(32, 28, 28); // Align load. 2479 // __ bind(Lltos); 2480 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2481 assert(branch_table[ltos] == 0, "can't compute twice"); 2482 branch_table[ltos] = __ pc(); // non-volatile_entry point 2483 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2484 __ push(ltos); 2485 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2486 __ beq(CCR6, Lacquire); // Volatile? 2487 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2488 2489 __ align(32, 28, 28); // Align load. 2490 // __ bind(Lbtos); 2491 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2492 assert(branch_table[btos] == 0, "can't compute twice"); 2493 branch_table[btos] = __ pc(); // non-volatile_entry point 2494 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2495 __ extsb(R17_tos, R17_tos); 2496 __ push(btos); 2497 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2498 __ beq(CCR6, Lacquire); // Volatile? 2499 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2500 2501 __ align(32, 28, 28); // Align load. 2502 // __ bind(Lctos); 2503 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2504 assert(branch_table[ctos] == 0, "can't compute twice"); 2505 branch_table[ctos] = __ pc(); // non-volatile_entry point 2506 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2507 __ push(ctos); 2508 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2509 __ beq(CCR6, Lacquire); // Volatile? 2510 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2511 2512 __ align(32, 28, 28); // Align load. 2513 // __ bind(Lstos); 2514 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2515 assert(branch_table[stos] == 0, "can't compute twice"); 2516 branch_table[stos] = __ pc(); // non-volatile_entry point 2517 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2518 __ push(stos); 2519 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2520 __ beq(CCR6, Lacquire); // Volatile? 2521 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2522 2523 __ align(32, 28, 28); // Align load. 2524 // __ bind(Latos); 2525 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2526 assert(branch_table[atos] == 0, "can't compute twice"); 2527 branch_table[atos] = __ pc(); // non-volatile_entry point 2528 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2529 __ verify_oop(R17_tos); 2530 __ push(atos); 2531 //__ dcbt(R17_tos); // prefetch 2532 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2533 __ beq(CCR6, Lacquire); // Volatile? 2534 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2535 2536 __ align(32, 12); 2537 __ bind(Lacquire); 2538 __ twi_0(R17_tos); 2539 __ bind(Lisync); 2540 __ isync(); // acquire 2541 2542 #ifdef ASSERT 2543 for (int i = 0; i<number_of_states; ++i) { 2544 assert(branch_table[i], "get initialization"); 2545 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2546 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2547 } 2548 #endif 2549 } 2550 2551 void TemplateTable::getfield(int byte_no) { 2552 getfield_or_static(byte_no, false); 2553 } 2554 2555 void TemplateTable::getstatic(int byte_no) { 2556 getfield_or_static(byte_no, true); 2557 } 2558 2559 // The registers cache and index expected to be set before call. 2560 // The function may destroy various registers, just not the cache and index registers. 2561 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2562 2563 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2564 2565 if (JvmtiExport::can_post_field_modification()) { 2566 Label Lno_field_mod_post; 2567 2568 // Check if post field access in enabled. 2569 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2570 __ lwz(Rscratch, offs, Rscratch); 2571 2572 __ cmpwi(CCR0, Rscratch, 0); 2573 __ beq(CCR0, Lno_field_mod_post); 2574 2575 // Do the post 2576 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2577 const Register Robj = Rscratch; 2578 2579 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2580 if (is_static) { 2581 // Life is simple. Null out the object pointer. 2582 __ li(Robj, 0); 2583 } else { 2584 // In case of the fast versions, value lives in registers => put it back on tos. 2585 int offs = Interpreter::expr_offset_in_bytes(0); 2586 Register base = R15_esp; 2587 switch(bytecode()) { 2588 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2589 case Bytecodes::_fast_iputfield: // Fall through 2590 case Bytecodes::_fast_bputfield: // Fall through 2591 case Bytecodes::_fast_cputfield: // Fall through 2592 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2593 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2594 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2595 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2596 default: { 2597 offs = 0; 2598 base = Robj; 2599 const Register Rflags = Robj; 2600 Label is_one_slot; 2601 // Life is harder. The stack holds the value on top, followed by the 2602 // object. We don't know the size of the value, though; it could be 2603 // one or two words depending on its type. As a result, we must find 2604 // the type to determine where the object is. 2605 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2606 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2607 2608 __ cmpwi(CCR0, Rflags, ltos); 2609 __ cmpwi(CCR1, Rflags, dtos); 2610 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2611 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2612 __ beq(CCR0, is_one_slot); 2613 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2614 __ bind(is_one_slot); 2615 break; 2616 } 2617 } 2618 __ ld(Robj, offs, base); 2619 __ verify_oop(Robj); 2620 } 2621 2622 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2623 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2624 __ get_cache_and_index_at_bcp(Rcache, 1); 2625 2626 // In case of the fast versions, value lives in registers => put it back on tos. 2627 switch(bytecode()) { 2628 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2629 case Bytecodes::_fast_iputfield: // Fall through 2630 case Bytecodes::_fast_bputfield: // Fall through 2631 case Bytecodes::_fast_cputfield: // Fall through 2632 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2633 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2634 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2635 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2636 default: break; // Nothin' to do. 2637 } 2638 2639 __ align(32, 12); 2640 __ bind(Lno_field_mod_post); 2641 } 2642 } 2643 2644 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2645 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2646 Label Lvolatile; 2647 2648 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2649 Rclass_or_obj = R31, // Needs to survive C call. 2650 Roffset = R22_tmp2, // Needs to survive C call. 2651 Rflags = R3_ARG1, 2652 Rbtable = R4_ARG2, 2653 Rscratch = R11_scratch1, 2654 Rscratch2 = R12_scratch2, 2655 Rscratch3 = R6_ARG4, 2656 Rbc = Rscratch3; 2657 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2658 2659 static address field_branch_table[number_of_states], 2660 static_branch_table[number_of_states]; 2661 2662 address* branch_table = is_static ? static_branch_table : field_branch_table; 2663 2664 // Stack (grows up): 2665 // value 2666 // obj 2667 2668 // Load the field offset. 2669 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2670 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2671 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2672 2673 // Load pointer to branch table. 2674 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2675 2676 // Get volatile flag. 2677 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2678 2679 // Check the field type. 2680 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2681 2682 #ifdef ASSERT 2683 Label LFlagInvalid; 2684 __ cmpldi(CCR0, Rflags, number_of_states); 2685 __ bge(CCR0, LFlagInvalid); 2686 #endif 2687 2688 // Load from branch table and dispatch (volatile case: one instruction ahead). 2689 __ sldi(Rflags, Rflags, LogBytesPerWord); 2690 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2691 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2692 __ ldx(Rbtable, Rbtable, Rflags); 2693 2694 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2695 __ mtctr(Rbtable); 2696 __ bctr(); 2697 2698 #ifdef ASSERT 2699 __ bind(LFlagInvalid); 2700 __ stop("got invalid flag", 0x656); 2701 2702 // __ bind(Lvtos); 2703 address pc_before_release = __ pc(); 2704 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2705 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2706 assert(branch_table[vtos] == 0, "can't compute twice"); 2707 branch_table[vtos] = __ pc(); // non-volatile_entry point 2708 __ stop("vtos unexpected", 0x657); 2709 #endif 2710 2711 __ align(32, 28, 28); // Align pop. 2712 // __ bind(Ldtos); 2713 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2714 assert(branch_table[dtos] == 0, "can't compute twice"); 2715 branch_table[dtos] = __ pc(); // non-volatile_entry point 2716 __ pop(dtos); 2717 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2718 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2719 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2720 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2721 __ beq(CR_is_vol, Lvolatile); // Volatile? 2722 } 2723 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2724 2725 __ align(32, 28, 28); // Align pop. 2726 // __ bind(Lftos); 2727 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2728 assert(branch_table[ftos] == 0, "can't compute twice"); 2729 branch_table[ftos] = __ pc(); // non-volatile_entry point 2730 __ pop(ftos); 2731 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2732 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2733 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2734 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2735 __ beq(CR_is_vol, Lvolatile); // Volatile? 2736 } 2737 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2738 2739 __ align(32, 28, 28); // Align pop. 2740 // __ bind(Litos); 2741 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2742 assert(branch_table[itos] == 0, "can't compute twice"); 2743 branch_table[itos] = __ pc(); // non-volatile_entry point 2744 __ pop(itos); 2745 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2746 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2747 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2748 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2749 __ beq(CR_is_vol, Lvolatile); // Volatile? 2750 } 2751 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2752 2753 __ align(32, 28, 28); // Align pop. 2754 // __ bind(Lltos); 2755 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2756 assert(branch_table[ltos] == 0, "can't compute twice"); 2757 branch_table[ltos] = __ pc(); // non-volatile_entry point 2758 __ pop(ltos); 2759 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2760 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2761 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2762 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2763 __ beq(CR_is_vol, Lvolatile); // Volatile? 2764 } 2765 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2766 2767 __ align(32, 28, 28); // Align pop. 2768 // __ bind(Lbtos); 2769 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2770 assert(branch_table[btos] == 0, "can't compute twice"); 2771 branch_table[btos] = __ pc(); // non-volatile_entry point 2772 __ pop(btos); 2773 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2774 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2775 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2776 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2777 __ beq(CR_is_vol, Lvolatile); // Volatile? 2778 } 2779 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2780 2781 __ align(32, 28, 28); // Align pop. 2782 // __ bind(Lctos); 2783 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2784 assert(branch_table[ctos] == 0, "can't compute twice"); 2785 branch_table[ctos] = __ pc(); // non-volatile_entry point 2786 __ pop(ctos); 2787 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2788 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2789 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2790 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2791 __ beq(CR_is_vol, Lvolatile); // Volatile? 2792 } 2793 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2794 2795 __ align(32, 28, 28); // Align pop. 2796 // __ bind(Lstos); 2797 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2798 assert(branch_table[stos] == 0, "can't compute twice"); 2799 branch_table[stos] = __ pc(); // non-volatile_entry point 2800 __ pop(stos); 2801 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2802 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2803 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2804 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2805 __ beq(CR_is_vol, Lvolatile); // Volatile? 2806 } 2807 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2808 2809 __ align(32, 28, 28); // Align pop. 2810 // __ bind(Latos); 2811 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2812 assert(branch_table[atos] == 0, "can't compute twice"); 2813 branch_table[atos] = __ pc(); // non-volatile_entry point 2814 __ pop(atos); 2815 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2816 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2817 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2818 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2819 __ beq(CR_is_vol, Lvolatile); // Volatile? 2820 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2821 2822 __ align(32, 12); 2823 __ bind(Lvolatile); 2824 __ fence(); 2825 } 2826 // fallthru: __ b(Lexit); 2827 2828 #ifdef ASSERT 2829 for (int i = 0; i<number_of_states; ++i) { 2830 assert(branch_table[i], "put initialization"); 2831 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2832 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2833 } 2834 #endif 2835 } 2836 2837 void TemplateTable::putfield(int byte_no) { 2838 putfield_or_static(byte_no, false); 2839 } 2840 2841 void TemplateTable::putstatic(int byte_no) { 2842 putfield_or_static(byte_no, true); 2843 } 2844 2845 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2846 void TemplateTable::jvmti_post_fast_field_mod() { 2847 __ should_not_reach_here(); 2848 } 2849 2850 void TemplateTable::fast_storefield(TosState state) { 2851 transition(state, vtos); 2852 2853 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2854 Rclass_or_obj = R31, // Needs to survive C call. 2855 Roffset = R22_tmp2, // Needs to survive C call. 2856 Rflags = R3_ARG1, 2857 Rscratch = R11_scratch1, 2858 Rscratch2 = R12_scratch2, 2859 Rscratch3 = R4_ARG2; 2860 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2861 2862 // Constant pool already resolved => Load flags and offset of field. 2863 __ get_cache_and_index_at_bcp(Rcache, 1); 2864 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2865 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2866 2867 // Get the obj and the final store addr. 2868 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2869 2870 // Get volatile flag. 2871 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2872 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2873 { 2874 Label LnotVolatile; 2875 __ beq(CCR0, LnotVolatile); 2876 __ release(); 2877 __ align(32, 12); 2878 __ bind(LnotVolatile); 2879 } 2880 2881 // Do the store and fencing. 2882 switch(bytecode()) { 2883 case Bytecodes::_fast_aputfield: 2884 // Store into the field. 2885 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2886 break; 2887 2888 case Bytecodes::_fast_iputfield: 2889 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2890 break; 2891 2892 case Bytecodes::_fast_lputfield: 2893 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2894 break; 2895 2896 case Bytecodes::_fast_bputfield: 2897 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2898 break; 2899 2900 case Bytecodes::_fast_cputfield: 2901 case Bytecodes::_fast_sputfield: 2902 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2903 break; 2904 2905 case Bytecodes::_fast_fputfield: 2906 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2907 break; 2908 2909 case Bytecodes::_fast_dputfield: 2910 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2911 break; 2912 2913 default: ShouldNotReachHere(); 2914 } 2915 2916 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2917 Label LVolatile; 2918 __ beq(CR_is_vol, LVolatile); 2919 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2920 2921 __ align(32, 12); 2922 __ bind(LVolatile); 2923 __ fence(); 2924 } 2925 } 2926 2927 void TemplateTable::fast_accessfield(TosState state) { 2928 transition(atos, state); 2929 2930 Label LisVolatile; 2931 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2932 2933 const Register Rcache = R3_ARG1, 2934 Rclass_or_obj = R17_tos, 2935 Roffset = R22_tmp2, 2936 Rflags = R23_tmp3, 2937 Rscratch = R12_scratch2; 2938 2939 // Constant pool already resolved. Get the field offset. 2940 __ get_cache_and_index_at_bcp(Rcache, 1); 2941 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2942 2943 // JVMTI support 2944 jvmti_post_field_access(Rcache, Rscratch, false, true); 2945 2946 // Get the load address. 2947 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 2948 2949 // Get volatile flag. 2950 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2951 __ bne(CCR0, LisVolatile); 2952 2953 switch(bytecode()) { 2954 case Bytecodes::_fast_agetfield: 2955 { 2956 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2957 __ verify_oop(R17_tos); 2958 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2959 2960 __ bind(LisVolatile); 2961 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2962 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2963 __ verify_oop(R17_tos); 2964 __ twi_0(R17_tos); 2965 __ isync(); 2966 break; 2967 } 2968 case Bytecodes::_fast_igetfield: 2969 { 2970 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2971 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2972 2973 __ bind(LisVolatile); 2974 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2975 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2976 __ twi_0(R17_tos); 2977 __ isync(); 2978 break; 2979 } 2980 case Bytecodes::_fast_lgetfield: 2981 { 2982 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2983 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2984 2985 __ bind(LisVolatile); 2986 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2987 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2988 __ twi_0(R17_tos); 2989 __ isync(); 2990 break; 2991 } 2992 case Bytecodes::_fast_bgetfield: 2993 { 2994 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2995 __ extsb(R17_tos, R17_tos); 2996 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2997 2998 __ bind(LisVolatile); 2999 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3000 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3001 __ twi_0(R17_tos); 3002 __ extsb(R17_tos, R17_tos); 3003 __ isync(); 3004 break; 3005 } 3006 case Bytecodes::_fast_cgetfield: 3007 { 3008 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3009 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3010 3011 __ bind(LisVolatile); 3012 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3013 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3014 __ twi_0(R17_tos); 3015 __ isync(); 3016 break; 3017 } 3018 case Bytecodes::_fast_sgetfield: 3019 { 3020 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3021 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3022 3023 __ bind(LisVolatile); 3024 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3025 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3026 __ twi_0(R17_tos); 3027 __ isync(); 3028 break; 3029 } 3030 case Bytecodes::_fast_fgetfield: 3031 { 3032 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3033 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3034 3035 __ bind(LisVolatile); 3036 Label Ldummy; 3037 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3038 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3039 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3040 __ bne_predict_not_taken(CCR0, Ldummy); 3041 __ bind(Ldummy); 3042 __ isync(); 3043 break; 3044 } 3045 case Bytecodes::_fast_dgetfield: 3046 { 3047 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3048 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3049 3050 __ bind(LisVolatile); 3051 Label Ldummy; 3052 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3053 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3054 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3055 __ bne_predict_not_taken(CCR0, Ldummy); 3056 __ bind(Ldummy); 3057 __ isync(); 3058 break; 3059 } 3060 default: ShouldNotReachHere(); 3061 } 3062 } 3063 3064 void TemplateTable::fast_xaccess(TosState state) { 3065 transition(vtos, state); 3066 3067 Label LisVolatile; 3068 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3069 const Register Rcache = R3_ARG1, 3070 Rclass_or_obj = R17_tos, 3071 Roffset = R22_tmp2, 3072 Rflags = R23_tmp3, 3073 Rscratch = R12_scratch2; 3074 3075 __ ld(Rclass_or_obj, 0, R18_locals); 3076 3077 // Constant pool already resolved. Get the field offset. 3078 __ get_cache_and_index_at_bcp(Rcache, 2); 3079 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3080 3081 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3082 3083 // Needed to report exception at the correct bcp. 3084 __ addi(R14_bcp, R14_bcp, 1); 3085 3086 // Get the load address. 3087 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3088 3089 // Get volatile flag. 3090 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3091 __ bne(CCR0, LisVolatile); 3092 3093 switch(state) { 3094 case atos: 3095 { 3096 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3097 __ verify_oop(R17_tos); 3098 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3099 3100 __ bind(LisVolatile); 3101 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3102 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3103 __ verify_oop(R17_tos); 3104 __ twi_0(R17_tos); 3105 __ isync(); 3106 break; 3107 } 3108 case itos: 3109 { 3110 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3111 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3112 3113 __ bind(LisVolatile); 3114 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3115 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3116 __ twi_0(R17_tos); 3117 __ isync(); 3118 break; 3119 } 3120 case ftos: 3121 { 3122 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3123 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3124 3125 __ bind(LisVolatile); 3126 Label Ldummy; 3127 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3128 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3129 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3130 __ bne_predict_not_taken(CCR0, Ldummy); 3131 __ bind(Ldummy); 3132 __ isync(); 3133 break; 3134 } 3135 default: ShouldNotReachHere(); 3136 } 3137 __ addi(R14_bcp, R14_bcp, -1); 3138 } 3139 3140 // ============================================================================ 3141 // Calls 3142 3143 // Common code for invoke 3144 // 3145 // Input: 3146 // - byte_no 3147 // 3148 // Output: 3149 // - Rmethod: The method to invoke next. 3150 // - Rret_addr: The return address to return to. 3151 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3152 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3153 // - Rflags: Method flags from const pool cache. 3154 // 3155 // Kills: 3156 // - Rscratch1 3157 // 3158 void TemplateTable::prepare_invoke(int byte_no, 3159 Register Rmethod, // linked method (or i-klass) 3160 Register Rret_addr,// return address 3161 Register Rindex, // itable index, MethodType, etc. 3162 Register Rrecv, // If caller wants to see it. 3163 Register Rflags, // If caller wants to test it. 3164 Register Rscratch 3165 ) { 3166 // Determine flags. 3167 const Bytecodes::Code code = bytecode(); 3168 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3169 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3170 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3171 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3172 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3173 const bool load_receiver = (Rrecv != noreg); 3174 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3175 3176 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3177 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3178 assert_different_registers(Rret_addr, Rscratch); 3179 3180 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3181 3182 // Saving of SP done in call_from_interpreter. 3183 3184 // Maybe push "appendix" to arguments. 3185 if (is_invokedynamic || is_invokehandle) { 3186 Label Ldone; 3187 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3188 __ beq(CCR0, Ldone); 3189 // Push "appendix" (MethodType, CallSite, etc.). 3190 // This must be done before we get the receiver, 3191 // since the parameter_size includes it. 3192 __ load_resolved_reference_at_index(Rscratch, Rindex); 3193 __ verify_oop(Rscratch); 3194 __ push_ptr(Rscratch); 3195 __ bind(Ldone); 3196 } 3197 3198 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3199 if (load_receiver) { 3200 const Register Rparam_count = Rscratch; 3201 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3202 __ load_receiver(Rparam_count, Rrecv); 3203 __ verify_oop(Rrecv); 3204 } 3205 3206 // Get return address. 3207 { 3208 Register Rtable_addr = Rscratch; 3209 Register Rret_type = Rret_addr; 3210 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3211 3212 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3213 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3214 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3215 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3216 // Get return address. 3217 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3218 } 3219 } 3220 3221 // Helper for virtual calls. Load target out of vtable and jump off! 3222 // Kills all passed registers. 3223 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3224 3225 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3226 const Register Rtarget_method = Rindex; 3227 3228 // Get target method & entry point. 3229 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3230 // Calc vtable addr scale the vtable index by 8. 3231 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3232 // Load target. 3233 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3234 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3235 // Argument and return type profiling. 3236 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3237 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3238 } 3239 3240 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3241 void TemplateTable::invokevirtual(int byte_no) { 3242 transition(vtos, vtos); 3243 3244 Register Rtable_addr = R11_scratch1, 3245 Rret_type = R12_scratch2, 3246 Rret_addr = R5_ARG3, 3247 Rflags = R22_tmp2, // Should survive C call. 3248 Rrecv = R3_ARG1, 3249 Rrecv_klass = Rrecv, 3250 Rvtableindex_or_method = R31, // Should survive C call. 3251 Rnum_params = R4_ARG2, 3252 Rnew_bc = R6_ARG4; 3253 3254 Label LnotFinal; 3255 3256 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3257 3258 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3259 __ bfalse(CCR0, LnotFinal); 3260 3261 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3262 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3263 3264 __ align(32, 12); 3265 __ bind(LnotFinal); 3266 // Load "this" pointer (receiver). 3267 __ rldicl(Rnum_params, Rflags, 64, 48); 3268 __ load_receiver(Rnum_params, Rrecv); 3269 __ verify_oop(Rrecv); 3270 3271 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3272 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3273 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3274 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3275 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3276 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3277 __ load_klass(Rrecv_klass, Rrecv); 3278 __ verify_klass_ptr(Rrecv_klass); 3279 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3280 3281 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3282 } 3283 3284 void TemplateTable::fast_invokevfinal(int byte_no) { 3285 transition(vtos, vtos); 3286 3287 assert(byte_no == f2_byte, "use this argument"); 3288 Register Rflags = R22_tmp2, 3289 Rmethod = R31; 3290 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3291 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3292 } 3293 3294 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3295 3296 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3297 3298 // Load receiver from stack slot. 3299 Register Rrecv = Rscratch2; 3300 Register Rnum_params = Rrecv; 3301 3302 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3303 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3304 3305 // Get return address. 3306 Register Rtable_addr = Rscratch1, 3307 Rret_addr = Rflags, 3308 Rret_type = Rret_addr; 3309 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3310 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3311 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3312 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3313 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3314 3315 // Load receiver and receiver NULL check. 3316 __ load_receiver(Rnum_params, Rrecv); 3317 __ null_check_throw(Rrecv, -1, Rscratch1); 3318 3319 __ profile_final_call(Rrecv, Rscratch1); 3320 // Argument and return type profiling. 3321 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3322 3323 // Do the call. 3324 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3325 } 3326 3327 void TemplateTable::invokespecial(int byte_no) { 3328 assert(byte_no == f1_byte, "use this argument"); 3329 transition(vtos, vtos); 3330 3331 Register Rtable_addr = R3_ARG1, 3332 Rret_addr = R4_ARG2, 3333 Rflags = R5_ARG3, 3334 Rreceiver = R6_ARG4, 3335 Rmethod = R31; 3336 3337 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3338 3339 // Receiver NULL check. 3340 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3341 3342 __ profile_call(R11_scratch1, R12_scratch2); 3343 // Argument and return type profiling. 3344 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3345 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3346 } 3347 3348 void TemplateTable::invokestatic(int byte_no) { 3349 assert(byte_no == f1_byte, "use this argument"); 3350 transition(vtos, vtos); 3351 3352 Register Rtable_addr = R3_ARG1, 3353 Rret_addr = R4_ARG2, 3354 Rflags = R5_ARG3; 3355 3356 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3357 3358 __ profile_call(R11_scratch1, R12_scratch2); 3359 // Argument and return type profiling. 3360 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3361 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3362 } 3363 3364 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3365 Register Rret, 3366 Register Rflags, 3367 Register Rindex, 3368 Register Rtemp1, 3369 Register Rtemp2) { 3370 3371 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3372 Label LnotFinal; 3373 3374 // Check for vfinal. 3375 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3376 __ bfalse(CCR0, LnotFinal); 3377 3378 Register Rscratch = Rflags; // Rflags is dead now. 3379 3380 // Final call case. 3381 __ profile_final_call(Rtemp1, Rscratch); 3382 // Argument and return type profiling. 3383 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); 3384 // Do the final call - the index (f2) contains the method. 3385 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3386 3387 // Non-final callc case. 3388 __ bind(LnotFinal); 3389 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3390 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3391 } 3392 3393 void TemplateTable::invokeinterface(int byte_no) { 3394 assert(byte_no == f1_byte, "use this argument"); 3395 transition(vtos, vtos); 3396 3397 const Register Rscratch1 = R11_scratch1, 3398 Rscratch2 = R12_scratch2, 3399 Rscratch3 = R9_ARG7, 3400 Rscratch4 = R10_ARG8, 3401 Rtable_addr = Rscratch2, 3402 Rinterface_klass = R5_ARG3, 3403 Rret_type = R8_ARG6, 3404 Rret_addr = Rret_type, 3405 Rindex = R6_ARG4, 3406 Rreceiver = R4_ARG2, 3407 Rrecv_klass = Rreceiver, 3408 Rflags = R7_ARG5; 3409 3410 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3411 3412 // Get receiver klass. 3413 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3414 __ load_klass(Rrecv_klass, Rreceiver); 3415 3416 // Check corner case object method. 3417 Label LobjectMethod; 3418 3419 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3420 __ btrue(CCR0, LobjectMethod); 3421 3422 // Fallthrough: The normal invokeinterface case. 3423 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3424 3425 // Find entry point to call. 3426 Label Lthrow_icc, Lthrow_ame; 3427 // Result will be returned in Rindex. 3428 __ mr(Rscratch4, Rrecv_klass); 3429 __ mr(Rscratch3, Rindex); 3430 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3431 3432 __ cmpdi(CCR0, Rindex, 0); 3433 __ beq(CCR0, Lthrow_ame); 3434 // Found entry. Jump off! 3435 // Argument and return type profiling. 3436 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); 3437 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3438 3439 // Vtable entry was NULL => Throw abstract method error. 3440 __ bind(Lthrow_ame); 3441 __ mr(Rrecv_klass, Rscratch4); 3442 __ mr(Rindex, Rscratch3); 3443 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3444 3445 // Interface was not found => Throw incompatible class change error. 3446 __ bind(Lthrow_icc); 3447 __ mr(Rrecv_klass, Rscratch4); 3448 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3449 3450 __ should_not_reach_here(); 3451 3452 // Special case of invokeinterface called for virtual method of 3453 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3454 // The invokeinterface was rewritten to a invokevirtual, hence we have 3455 // to handle this corner case. This code isn't produced by javac, but could 3456 // be produced by another compliant java compiler. 3457 __ bind(LobjectMethod); 3458 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3459 } 3460 3461 void TemplateTable::invokedynamic(int byte_no) { 3462 transition(vtos, vtos); 3463 3464 const Register Rret_addr = R3_ARG1, 3465 Rflags = R4_ARG2, 3466 Rmethod = R22_tmp2, 3467 Rscratch1 = R11_scratch1, 3468 Rscratch2 = R12_scratch2; 3469 3470 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3471 3472 // Profile this call. 3473 __ profile_call(Rscratch1, Rscratch2); 3474 3475 // Off we go. With the new method handles, we don't jump to a method handle 3476 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3477 // to be the callsite object the bootstrap method returned. This is passed to a 3478 // "link" method which does the dispatch (Most likely just grabs the MH stored 3479 // inside the callsite and does an invokehandle). 3480 // Argument and return type profiling. 3481 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3482 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3483 } 3484 3485 void TemplateTable::invokehandle(int byte_no) { 3486 transition(vtos, vtos); 3487 3488 const Register Rret_addr = R3_ARG1, 3489 Rflags = R4_ARG2, 3490 Rrecv = R5_ARG3, 3491 Rmethod = R22_tmp2, 3492 Rscratch1 = R11_scratch1, 3493 Rscratch2 = R12_scratch2; 3494 3495 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3496 __ verify_method_ptr(Rmethod); 3497 __ null_check_throw(Rrecv, -1, Rscratch2); 3498 3499 __ profile_final_call(Rrecv, Rscratch1); 3500 3501 // Still no call from handle => We call the method handle interpreter here. 3502 // Argument and return type profiling. 3503 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3504 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3505 } 3506 3507 // ============================================================================= 3508 // Allocation 3509 3510 // Puts allocated obj ref onto the expression stack. 3511 void TemplateTable::_new() { 3512 transition(vtos, atos); 3513 3514 Label Lslow_case, 3515 Ldone, 3516 Linitialize_header, 3517 Lallocate_shared, 3518 Linitialize_object; // Including clearing the fields. 3519 3520 const Register RallocatedObject = R17_tos, 3521 RinstanceKlass = R9_ARG7, 3522 Rscratch = R11_scratch1, 3523 Roffset = R8_ARG6, 3524 Rinstance_size = Roffset, 3525 Rcpool = R4_ARG2, 3526 Rtags = R3_ARG1, 3527 Rindex = R5_ARG3; 3528 3529 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); 3530 3531 // -------------------------------------------------------------------------- 3532 // Check if fast case is possible. 3533 3534 // Load pointers to const pool and const pool's tags array. 3535 __ get_cpool_and_tags(Rcpool, Rtags); 3536 // Load index of constant pool entry. 3537 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3538 3539 if (UseTLAB) { 3540 // Make sure the class we're about to instantiate has been resolved 3541 // This is done before loading instanceKlass to be consistent with the order 3542 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3543 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3544 __ lbzx(Rtags, Rindex, Rtags); 3545 3546 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3547 __ bne(CCR0, Lslow_case); 3548 3549 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3550 __ sldi(Roffset, Rindex, LogBytesPerWord); 3551 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3552 __ isync(); // Order load of instance Klass wrt. tags. 3553 __ ldx(RinstanceKlass, Roffset, Rscratch); 3554 3555 // Make sure klass is fully initialized and get instance_size. 3556 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3557 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3558 3559 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3560 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3561 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3562 3563 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3564 __ beq(CCR0, Lslow_case); 3565 3566 // -------------------------------------------------------------------------- 3567 // Fast case: 3568 // Allocate the instance. 3569 // 1) Try to allocate in the TLAB. 3570 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3571 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3572 3573 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3574 Register RnewTopValue = R6_ARG4; 3575 Register RendValue = R7_ARG5; 3576 3577 // Check if we can allocate in the TLAB. 3578 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3579 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3580 3581 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3582 3583 // If there is enough space, we do not CAS and do not clear. 3584 __ cmpld(CCR0, RnewTopValue, RendValue); 3585 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3586 3587 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3588 3589 if (ZeroTLAB) { 3590 // The fields have already been cleared. 3591 __ b(Linitialize_header); 3592 } else { 3593 // Initialize both the header and fields. 3594 __ b(Linitialize_object); 3595 } 3596 3597 // Fall through: TLAB was too small. 3598 if (allow_shared_alloc) { 3599 Register RtlabWasteLimitValue = R10_ARG8; 3600 Register RfreeValue = RnewTopValue; 3601 3602 __ bind(Lallocate_shared); 3603 // Check if tlab should be discarded (refill_waste_limit >= free). 3604 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3605 __ subf(RfreeValue, RoldTopValue, RendValue); 3606 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3607 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3608 __ bge(CCR0, Lslow_case); 3609 3610 // Increment waste limit to prevent getting stuck on this slow path. 3611 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3612 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3613 } 3614 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3615 } 3616 // else: Always go the slow path. 3617 3618 // -------------------------------------------------------------------------- 3619 // slow case 3620 __ bind(Lslow_case); 3621 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3622 3623 if (UseTLAB) { 3624 __ b(Ldone); 3625 // -------------------------------------------------------------------------- 3626 // Init1: Zero out newly allocated memory. 3627 3628 if (!ZeroTLAB || allow_shared_alloc) { 3629 // Clear object fields. 3630 __ bind(Linitialize_object); 3631 3632 // Initialize remaining object fields. 3633 Register Rbase = Rtags; 3634 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3635 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3636 __ srdi(Rinstance_size, Rinstance_size, 3); 3637 3638 // Clear out object skipping header. Takes also care of the zero length case. 3639 __ clear_memory_doubleword(Rbase, Rinstance_size); 3640 // fallthru: __ b(Linitialize_header); 3641 } 3642 3643 // -------------------------------------------------------------------------- 3644 // Init2: Initialize the header: mark, klass 3645 __ bind(Linitialize_header); 3646 3647 // Init mark. 3648 if (UseBiasedLocking) { 3649 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3650 } else { 3651 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3652 } 3653 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3654 3655 // Init klass. 3656 __ store_klass_gap(RallocatedObject); 3657 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3658 3659 // Check and trigger dtrace event. 3660 { 3661 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3662 __ push(atos); 3663 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3664 __ pop(atos); 3665 } 3666 } 3667 3668 // continue 3669 __ bind(Ldone); 3670 3671 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3672 __ membar(Assembler::StoreStore); 3673 } 3674 3675 void TemplateTable::newarray() { 3676 transition(itos, atos); 3677 3678 __ lbz(R4, 1, R14_bcp); 3679 __ extsw(R5, R17_tos); 3680 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3681 3682 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3683 __ membar(Assembler::StoreStore); 3684 } 3685 3686 void TemplateTable::anewarray() { 3687 transition(itos, atos); 3688 3689 __ get_constant_pool(R4); 3690 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3691 __ extsw(R6, R17_tos); // size 3692 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3693 3694 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3695 __ membar(Assembler::StoreStore); 3696 } 3697 3698 // Allocate a multi dimensional array 3699 void TemplateTable::multianewarray() { 3700 transition(vtos, atos); 3701 3702 Register Rptr = R31; // Needs to survive C call. 3703 3704 // Put ndims * wordSize into frame temp slot 3705 __ lbz(Rptr, 3, R14_bcp); 3706 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3707 // Esp points past last_dim, so set to R4 to first_dim address. 3708 __ add(R4, Rptr, R15_esp); 3709 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3710 // Pop all dimensions off the stack. 3711 __ add(R15_esp, Rptr, R15_esp); 3712 3713 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3714 __ membar(Assembler::StoreStore); 3715 } 3716 3717 void TemplateTable::arraylength() { 3718 transition(atos, itos); 3719 3720 Label LnoException; 3721 __ verify_oop(R17_tos); 3722 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3723 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3724 } 3725 3726 // ============================================================================ 3727 // Typechecks 3728 3729 void TemplateTable::checkcast() { 3730 transition(atos, atos); 3731 3732 Label Ldone, Lis_null, Lquicked, Lresolved; 3733 Register Roffset = R6_ARG4, 3734 RobjKlass = R4_ARG2, 3735 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3736 Rcpool = R11_scratch1, 3737 Rtags = R12_scratch2; 3738 3739 // Null does not pass. 3740 __ cmpdi(CCR0, R17_tos, 0); 3741 __ beq(CCR0, Lis_null); 3742 3743 // Get constant pool tag to find out if the bytecode has already been "quickened". 3744 __ get_cpool_and_tags(Rcpool, Rtags); 3745 3746 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3747 3748 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3749 __ lbzx(Rtags, Rtags, Roffset); 3750 3751 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3752 __ beq(CCR0, Lquicked); 3753 3754 // Call into the VM to "quicken" instanceof. 3755 __ push_ptr(); // for GC 3756 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3757 __ get_vm_result_2(RspecifiedKlass); 3758 __ pop_ptr(); // Restore receiver. 3759 __ b(Lresolved); 3760 3761 // Extract target class from constant pool. 3762 __ bind(Lquicked); 3763 __ sldi(Roffset, Roffset, LogBytesPerWord); 3764 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3765 __ isync(); // Order load of specified Klass wrt. tags. 3766 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3767 3768 // Do the checkcast. 3769 __ bind(Lresolved); 3770 // Get value klass in RobjKlass. 3771 __ load_klass(RobjKlass, R17_tos); 3772 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3773 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3774 3775 // Not a subtype; so must throw exception 3776 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3777 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3778 __ mtctr(R11_scratch1); 3779 __ bctr(); 3780 3781 // Profile the null case. 3782 __ align(32, 12); 3783 __ bind(Lis_null); 3784 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3785 3786 __ align(32, 12); 3787 __ bind(Ldone); 3788 } 3789 3790 // Output: 3791 // - tos == 0: Obj was null or not an instance of class. 3792 // - tos == 1: Obj was an instance of class. 3793 void TemplateTable::instanceof() { 3794 transition(atos, itos); 3795 3796 Label Ldone, Lis_null, Lquicked, Lresolved; 3797 Register Roffset = R6_ARG4, 3798 RobjKlass = R4_ARG2, 3799 RspecifiedKlass = R5_ARG3, 3800 Rcpool = R11_scratch1, 3801 Rtags = R12_scratch2; 3802 3803 // Null does not pass. 3804 __ cmpdi(CCR0, R17_tos, 0); 3805 __ beq(CCR0, Lis_null); 3806 3807 // Get constant pool tag to find out if the bytecode has already been "quickened". 3808 __ get_cpool_and_tags(Rcpool, Rtags); 3809 3810 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3811 3812 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3813 __ lbzx(Rtags, Rtags, Roffset); 3814 3815 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3816 __ beq(CCR0, Lquicked); 3817 3818 // Call into the VM to "quicken" instanceof. 3819 __ push_ptr(); // for GC 3820 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3821 __ get_vm_result_2(RspecifiedKlass); 3822 __ pop_ptr(); // Restore receiver. 3823 __ b(Lresolved); 3824 3825 // Extract target class from constant pool. 3826 __ bind(Lquicked); 3827 __ sldi(Roffset, Roffset, LogBytesPerWord); 3828 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3829 __ isync(); // Order load of specified Klass wrt. tags. 3830 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3831 3832 // Do the checkcast. 3833 __ bind(Lresolved); 3834 // Get value klass in RobjKlass. 3835 __ load_klass(RobjKlass, R17_tos); 3836 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3837 __ li(R17_tos, 1); 3838 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3839 __ li(R17_tos, 0); 3840 3841 if (ProfileInterpreter) { 3842 __ b(Ldone); 3843 } 3844 3845 // Profile the null case. 3846 __ align(32, 12); 3847 __ bind(Lis_null); 3848 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3849 3850 __ align(32, 12); 3851 __ bind(Ldone); 3852 } 3853 3854 // ============================================================================= 3855 // Breakpoints 3856 3857 void TemplateTable::_breakpoint() { 3858 transition(vtos, vtos); 3859 3860 // Get the unpatched byte code. 3861 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3862 __ mr(R31, R3_RET); 3863 3864 // Post the breakpoint event. 3865 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3866 3867 // Complete the execution of original bytecode. 3868 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3869 } 3870 3871 // ============================================================================= 3872 // Exceptions 3873 3874 void TemplateTable::athrow() { 3875 transition(atos, vtos); 3876 3877 // Exception oop is in tos 3878 __ verify_oop(R17_tos); 3879 3880 __ null_check_throw(R17_tos, -1, R11_scratch1); 3881 3882 // Throw exception interpreter entry expects exception oop to be in R3. 3883 __ mr(R3_RET, R17_tos); 3884 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3885 __ mtctr(R11_scratch1); 3886 __ bctr(); 3887 } 3888 3889 // ============================================================================= 3890 // Synchronization 3891 // Searches the basic object lock list on the stack for a free slot 3892 // and uses it to lock the obect in tos. 3893 // 3894 // Recursive locking is enabled by exiting the search if the same 3895 // object is already found in the list. Thus, a new basic lock obj lock 3896 // is allocated "higher up" in the stack and thus is found first 3897 // at next monitor exit. 3898 void TemplateTable::monitorenter() { 3899 transition(atos, vtos); 3900 3901 __ verify_oop(R17_tos); 3902 3903 Register Rcurrent_monitor = R11_scratch1, 3904 Rcurrent_obj = R12_scratch2, 3905 Robj_to_lock = R17_tos, 3906 Rscratch1 = R3_ARG1, 3907 Rscratch2 = R4_ARG2, 3908 Rscratch3 = R5_ARG3, 3909 Rcurrent_obj_addr = R6_ARG4; 3910 3911 // ------------------------------------------------------------------------------ 3912 // Null pointer exception. 3913 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3914 3915 // Try to acquire a lock on the object. 3916 // Repeat until succeeded (i.e., until monitorenter returns true). 3917 3918 // ------------------------------------------------------------------------------ 3919 // Find a free slot in the monitor block. 3920 Label Lfound, Lexit, Lallocate_new; 3921 ConditionRegister found_free_slot = CCR0, 3922 found_same_obj = CCR1, 3923 reached_limit = CCR6; 3924 { 3925 Label Lloop, Lentry; 3926 Register Rlimit = Rcurrent_monitor; 3927 3928 // Set up search loop - start with topmost monitor. 3929 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 3930 3931 __ ld(Rlimit, 0, R1_SP); 3932 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 3933 3934 // Check if any slot is present => short cut to allocation if not. 3935 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3936 __ bgt(reached_limit, Lallocate_new); 3937 3938 // Pre-load topmost slot. 3939 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3940 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3941 // The search loop. 3942 __ bind(Lloop); 3943 // Found free slot? 3944 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 3945 // Is this entry for same obj? If so, stop the search and take the found 3946 // free slot or allocate a new one to enable recursive locking. 3947 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 3948 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3949 __ beq(found_free_slot, Lexit); 3950 __ beq(found_same_obj, Lallocate_new); 3951 __ bgt(reached_limit, Lallocate_new); 3952 // Check if last allocated BasicLockObj reached. 3953 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3954 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3955 // Next iteration if unchecked BasicObjectLocks exist on the stack. 3956 __ b(Lloop); 3957 } 3958 3959 // ------------------------------------------------------------------------------ 3960 // Check if we found a free slot. 3961 __ bind(Lexit); 3962 3963 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 3964 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 3965 __ b(Lfound); 3966 3967 // We didn't find a free BasicObjLock => allocate one. 3968 __ align(32, 12); 3969 __ bind(Lallocate_new); 3970 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 3971 __ mr(Rcurrent_monitor, R26_monitor); 3972 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 3973 3974 // ------------------------------------------------------------------------------ 3975 // We now have a slot to lock. 3976 __ bind(Lfound); 3977 3978 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3979 // The object has already been poped from the stack, so the expression stack looks correct. 3980 __ addi(R14_bcp, R14_bcp, 1); 3981 3982 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 3983 __ lock_object(Rcurrent_monitor, Robj_to_lock); 3984 3985 // Check if there's enough space on the stack for the monitors after locking. 3986 Label Lskip_stack_check; 3987 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 3988 // the stack check. There should be enough shadow pages to fit that in. 3989 __ ld(Rscratch3, 0, R1_SP); 3990 __ sub(Rscratch3, Rscratch3, R26_monitor); 3991 __ cmpdi(CCR0, Rscratch3, 4*K); 3992 __ blt(CCR0, Lskip_stack_check); 3993 3994 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 3995 __ li(Rscratch1, 0); 3996 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 3997 3998 __ align(32, 12); 3999 __ bind(Lskip_stack_check); 4000 4001 // The bcp has already been incremented. Just need to dispatch to next instruction. 4002 __ dispatch_next(vtos); 4003 } 4004 4005 void TemplateTable::monitorexit() { 4006 transition(atos, vtos); 4007 __ verify_oop(R17_tos); 4008 4009 Register Rcurrent_monitor = R11_scratch1, 4010 Rcurrent_obj = R12_scratch2, 4011 Robj_to_lock = R17_tos, 4012 Rcurrent_obj_addr = R3_ARG1, 4013 Rlimit = R4_ARG2; 4014 Label Lfound, Lillegal_monitor_state; 4015 4016 // Check corner case: unbalanced monitorEnter / Exit. 4017 __ ld(Rlimit, 0, R1_SP); 4018 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4019 4020 // Null pointer check. 4021 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4022 4023 __ cmpld(CCR0, R26_monitor, Rlimit); 4024 __ bgt(CCR0, Lillegal_monitor_state); 4025 4026 // Find the corresponding slot in the monitors stack section. 4027 { 4028 Label Lloop; 4029 4030 // Start with topmost monitor. 4031 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4032 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4033 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4034 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4035 4036 __ bind(Lloop); 4037 // Is this entry for same obj? 4038 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4039 __ beq(CCR0, Lfound); 4040 4041 // Check if last allocated BasicLockObj reached. 4042 4043 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4044 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4045 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4046 4047 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4048 __ ble(CCR0, Lloop); 4049 } 4050 4051 // Fell through without finding the basic obj lock => throw up! 4052 __ bind(Lillegal_monitor_state); 4053 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4054 __ should_not_reach_here(); 4055 4056 __ align(32, 12); 4057 __ bind(Lfound); 4058 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4059 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4060 __ unlock_object(Rcurrent_monitor); 4061 } 4062 4063 // ============================================================================ 4064 // Wide bytecodes 4065 4066 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4067 void TemplateTable::wide() { 4068 transition(vtos, vtos); 4069 4070 const Register Rtable = R11_scratch1, 4071 Rindex = R12_scratch2, 4072 Rtmp = R0; 4073 4074 __ lbz(Rindex, 1, R14_bcp); 4075 4076 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4077 4078 __ slwi(Rindex, Rindex, LogBytesPerWord); 4079 __ ldx(Rtmp, Rtable, Rindex); 4080 __ mtctr(Rtmp); 4081 __ bctr(); 4082 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4083 } 4084 #endif // !CC_INTERP