1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.inline.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #ifndef CC_INTERP 43 44 #undef __ 45 #define __ _masm-> 46 47 // ============================================================================ 48 // Misc helpers 49 50 // Do an oop store like *(base + index) = val OR *(base + offset) = val 51 // (only one of both variants is possible at the same time). 52 // Index can be noreg. 53 // Kills: 54 // Rbase, Rtmp 55 static void do_oop_store(InterpreterMacroAssembler* _masm, 56 Register Rbase, 57 RegisterOrConstant offset, 58 Register Rval, // Noreg means always null. 59 Register Rtmp1, 60 Register Rtmp2, 61 Register Rtmp3, 62 BarrierSet::Name barrier, 63 bool precise, 64 bool check_null) { 65 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 66 67 switch (barrier) { 68 #if INCLUDE_ALL_GCS 69 case BarrierSet::G1SATBCT: 70 case BarrierSet::G1SATBCTLogging: 71 { 72 // Load and record the previous value. 73 __ g1_write_barrier_pre(Rbase, offset, 74 Rtmp3, /* holder of pre_val ? */ 75 Rtmp1, Rtmp2, false /* frame */); 76 77 Label Lnull, Ldone; 78 if (Rval != noreg) { 79 if (check_null) { 80 __ cmpdi(CCR0, Rval, 0); 81 __ beq(CCR0, Lnull); 82 } 83 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 84 // Mark the card. 85 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 86 __ add(Rbase, offset, Rbase); 87 } 88 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 89 if (check_null) { __ b(Ldone); } 90 } 91 92 if (Rval == noreg || check_null) { // Store null oop. 93 Register Rnull = Rval; 94 __ bind(Lnull); 95 if (Rval == noreg) { 96 Rnull = Rtmp1; 97 __ li(Rnull, 0); 98 } 99 if (UseCompressedOops) { 100 __ stw(Rnull, offset, Rbase); 101 } else { 102 __ std(Rnull, offset, Rbase); 103 } 104 } 105 __ bind(Ldone); 106 } 107 break; 108 #endif // INCLUDE_ALL_GCS 109 case BarrierSet::CardTableModRef: 110 case BarrierSet::CardTableExtension: 111 { 112 Label Lnull, Ldone; 113 if (Rval != noreg) { 114 if (check_null) { 115 __ cmpdi(CCR0, Rval, 0); 116 __ beq(CCR0, Lnull); 117 } 118 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 119 // Mark the card. 120 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 121 __ add(Rbase, offset, Rbase); 122 } 123 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 124 if (check_null) { 125 __ b(Ldone); 126 } 127 } 128 129 if (Rval == noreg || check_null) { // Store null oop. 130 Register Rnull = Rval; 131 __ bind(Lnull); 132 if (Rval == noreg) { 133 Rnull = Rtmp1; 134 __ li(Rnull, 0); 135 } 136 if (UseCompressedOops) { 137 __ stw(Rnull, offset, Rbase); 138 } else { 139 __ std(Rnull, offset, Rbase); 140 } 141 } 142 __ bind(Ldone); 143 } 144 break; 145 case BarrierSet::ModRef: 146 case BarrierSet::Other: 147 ShouldNotReachHere(); 148 break; 149 default: 150 ShouldNotReachHere(); 151 } 152 } 153 154 // ============================================================================ 155 // Platform-dependent initialization 156 157 void TemplateTable::pd_initialize() { 158 // No ppc64 specific initialization. 159 } 160 161 Address TemplateTable::at_bcp(int offset) { 162 // Not used on ppc. 163 ShouldNotReachHere(); 164 return Address(); 165 } 166 167 // Patches the current bytecode (ptr to it located in bcp) 168 // in the bytecode stream with a new one. 169 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 170 // With sharing on, may need to test method flag. 171 if (!RewriteBytecodes) return; 172 Label L_patch_done; 173 174 switch (new_bc) { 175 case Bytecodes::_fast_aputfield: 176 case Bytecodes::_fast_bputfield: 177 case Bytecodes::_fast_cputfield: 178 case Bytecodes::_fast_dputfield: 179 case Bytecodes::_fast_fputfield: 180 case Bytecodes::_fast_iputfield: 181 case Bytecodes::_fast_lputfield: 182 case Bytecodes::_fast_sputfield: 183 { 184 // We skip bytecode quickening for putfield instructions when 185 // the put_code written to the constant pool cache is zero. 186 // This is required so that every execution of this instruction 187 // calls out to InterpreterRuntime::resolve_get_put to do 188 // additional, required work. 189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 190 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 191 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 192 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 193 #if defined(VM_LITTLE_ENDIAN) 194 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 195 #else 196 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 197 #endif 198 __ cmpwi(CCR0, Rnew_bc, 0); 199 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 200 __ beq(CCR0, L_patch_done); 201 // __ isync(); // acquire not needed 202 break; 203 } 204 205 default: 206 assert(byte_no == -1, "sanity"); 207 if (load_bc_into_bc_reg) { 208 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 209 } 210 } 211 212 if (JvmtiExport::can_post_breakpoint()) { 213 Label L_fast_patch; 214 __ lbz(Rtemp, 0, R14_bcp); 215 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 216 __ bne(CCR0, L_fast_patch); 217 // Perform the quickening, slowly, in the bowels of the breakpoint table. 218 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 219 __ b(L_patch_done); 220 __ bind(L_fast_patch); 221 } 222 223 // Patch bytecode. 224 __ stb(Rnew_bc, 0, R14_bcp); 225 226 __ bind(L_patch_done); 227 } 228 229 // ============================================================================ 230 // Individual instructions 231 232 void TemplateTable::nop() { 233 transition(vtos, vtos); 234 // Nothing to do. 235 } 236 237 void TemplateTable::shouldnotreachhere() { 238 transition(vtos, vtos); 239 __ stop("shouldnotreachhere bytecode"); 240 } 241 242 void TemplateTable::aconst_null() { 243 transition(vtos, atos); 244 __ li(R17_tos, 0); 245 } 246 247 void TemplateTable::iconst(int value) { 248 transition(vtos, itos); 249 assert(value >= -1 && value <= 5, ""); 250 __ li(R17_tos, value); 251 } 252 253 void TemplateTable::lconst(int value) { 254 transition(vtos, ltos); 255 assert(value >= -1 && value <= 5, ""); 256 __ li(R17_tos, value); 257 } 258 259 void TemplateTable::fconst(int value) { 260 transition(vtos, ftos); 261 static float zero = 0.0; 262 static float one = 1.0; 263 static float two = 2.0; 264 switch (value) { 265 default: ShouldNotReachHere(); 266 case 0: { 267 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 268 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 269 break; 270 } 271 case 1: { 272 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 273 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 274 break; 275 } 276 case 2: { 277 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 278 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 279 break; 280 } 281 } 282 } 283 284 void TemplateTable::dconst(int value) { 285 transition(vtos, dtos); 286 static double zero = 0.0; 287 static double one = 1.0; 288 switch (value) { 289 case 0: { 290 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 291 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 292 break; 293 } 294 case 1: { 295 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 296 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 297 break; 298 } 299 default: ShouldNotReachHere(); 300 } 301 } 302 303 void TemplateTable::bipush() { 304 transition(vtos, itos); 305 __ lbz(R17_tos, 1, R14_bcp); 306 __ extsb(R17_tos, R17_tos); 307 } 308 309 void TemplateTable::sipush() { 310 transition(vtos, itos); 311 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 312 } 313 314 void TemplateTable::ldc(bool wide) { 315 Register Rscratch1 = R11_scratch1, 316 Rscratch2 = R12_scratch2, 317 Rcpool = R3_ARG1; 318 319 transition(vtos, vtos); 320 Label notInt, notClass, exit; 321 322 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 323 if (wide) { // Read index. 324 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 325 } else { 326 __ lbz(Rscratch1, 1, R14_bcp); 327 } 328 329 const int base_offset = ConstantPool::header_size() * wordSize; 330 const int tags_offset = Array<u1>::base_offset_in_bytes(); 331 332 // Get type from tags. 333 __ addi(Rscratch2, Rscratch2, tags_offset); 334 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 335 336 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 337 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 338 __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 339 340 // Resolved class - need to call vm to get java mirror of the class. 341 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 342 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above? 343 __ beq(CCR0, notClass); 344 345 __ li(R4, wide ? 1 : 0); 346 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 347 __ push(atos); 348 __ b(exit); 349 350 __ align(32, 12); 351 __ bind(notClass); 352 __ addi(Rcpool, Rcpool, base_offset); 353 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 354 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 355 __ bne(CCR0, notInt); 356 __ isync(); // Order load of constant wrt. tags. 357 __ lwax(R17_tos, Rcpool, Rscratch1); 358 __ push(itos); 359 __ b(exit); 360 361 __ align(32, 12); 362 __ bind(notInt); 363 #ifdef ASSERT 364 // String and Object are rewritten to fast_aldc 365 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 366 __ asm_assert_eq("unexpected type", 0x8765); 367 #endif 368 __ isync(); // Order load of constant wrt. tags. 369 __ lfsx(F15_ftos, Rcpool, Rscratch1); 370 __ push(ftos); 371 372 __ align(32, 12); 373 __ bind(exit); 374 } 375 376 // Fast path for caching oop constants. 377 void TemplateTable::fast_aldc(bool wide) { 378 transition(vtos, atos); 379 380 int index_size = wide ? sizeof(u2) : sizeof(u1); 381 const Register Rscratch = R11_scratch1; 382 Label resolved; 383 384 // We are resolved if the resolved reference cache entry contains a 385 // non-null object (CallSite, etc.) 386 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 387 __ load_resolved_reference_at_index(R17_tos, Rscratch); 388 __ cmpdi(CCR0, R17_tos, 0); 389 __ bne(CCR0, resolved); 390 __ load_const_optimized(R3_ARG1, (int)bytecode()); 391 392 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 393 394 // First time invocation - must resolve first. 395 __ call_VM(R17_tos, entry, R3_ARG1); 396 397 __ align(32, 12); 398 __ bind(resolved); 399 __ verify_oop(R17_tos); 400 } 401 402 void TemplateTable::ldc2_w() { 403 transition(vtos, vtos); 404 Label Llong, Lexit; 405 406 Register Rindex = R11_scratch1, 407 Rcpool = R12_scratch2, 408 Rtag = R3_ARG1; 409 __ get_cpool_and_tags(Rcpool, Rtag); 410 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 411 412 const int base_offset = ConstantPool::header_size() * wordSize; 413 const int tags_offset = Array<u1>::base_offset_in_bytes(); 414 // Get type from tags. 415 __ addi(Rcpool, Rcpool, base_offset); 416 __ addi(Rtag, Rtag, tags_offset); 417 418 __ lbzx(Rtag, Rtag, Rindex); 419 420 __ sldi(Rindex, Rindex, LogBytesPerWord); 421 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 422 __ bne(CCR0, Llong); 423 // A double can be placed at word-aligned locations in the constant pool. 424 // Check out Conversions.java for an example. 425 // Also ConstantPool::header_size() is 20, which makes it very difficult 426 // to double-align double on the constant pool. SG, 11/7/97 427 __ isync(); // Order load of constant wrt. tags. 428 __ lfdx(F15_ftos, Rcpool, Rindex); 429 __ push(dtos); 430 __ b(Lexit); 431 432 __ bind(Llong); 433 __ isync(); // Order load of constant wrt. tags. 434 __ ldx(R17_tos, Rcpool, Rindex); 435 __ push(ltos); 436 437 __ bind(Lexit); 438 } 439 440 // Get the locals index located in the bytecode stream at bcp + offset. 441 void TemplateTable::locals_index(Register Rdst, int offset) { 442 __ lbz(Rdst, offset, R14_bcp); 443 } 444 445 void TemplateTable::iload() { 446 transition(vtos, itos); 447 448 // Get the local value into tos 449 const Register Rindex = R22_tmp2; 450 locals_index(Rindex); 451 452 // Rewrite iload,iload pair into fast_iload2 453 // iload,caload pair into fast_icaload 454 if (RewriteFrequentPairs) { 455 Label Lrewrite, Ldone; 456 Register Rnext_byte = R3_ARG1, 457 Rrewrite_to = R6_ARG4, 458 Rscratch = R11_scratch1; 459 460 // get next byte 461 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 462 463 // if _iload, wait to rewrite to iload2. We only want to rewrite the 464 // last two iloads in a pair. Comparing against fast_iload means that 465 // the next bytecode is neither an iload or a caload, and therefore 466 // an iload pair. 467 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 468 __ beq(CCR0, Ldone); 469 470 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 471 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 472 __ beq(CCR1, Lrewrite); 473 474 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 475 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 476 __ beq(CCR0, Lrewrite); 477 478 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 479 480 __ bind(Lrewrite); 481 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 482 __ bind(Ldone); 483 } 484 485 __ load_local_int(R17_tos, Rindex, Rindex); 486 } 487 488 // Load 2 integers in a row without dispatching 489 void TemplateTable::fast_iload2() { 490 transition(vtos, itos); 491 492 __ lbz(R3_ARG1, 1, R14_bcp); 493 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 494 495 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 496 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 497 __ push_i(R3_ARG1); 498 } 499 500 void TemplateTable::fast_iload() { 501 transition(vtos, itos); 502 // Get the local value into tos 503 504 const Register Rindex = R11_scratch1; 505 locals_index(Rindex); 506 __ load_local_int(R17_tos, Rindex, Rindex); 507 } 508 509 // Load a local variable type long from locals area to TOS cache register. 510 // Local index resides in bytecodestream. 511 void TemplateTable::lload() { 512 transition(vtos, ltos); 513 514 const Register Rindex = R11_scratch1; 515 locals_index(Rindex); 516 __ load_local_long(R17_tos, Rindex, Rindex); 517 } 518 519 void TemplateTable::fload() { 520 transition(vtos, ftos); 521 522 const Register Rindex = R11_scratch1; 523 locals_index(Rindex); 524 __ load_local_float(F15_ftos, Rindex, Rindex); 525 } 526 527 void TemplateTable::dload() { 528 transition(vtos, dtos); 529 530 const Register Rindex = R11_scratch1; 531 locals_index(Rindex); 532 __ load_local_double(F15_ftos, Rindex, Rindex); 533 } 534 535 void TemplateTable::aload() { 536 transition(vtos, atos); 537 538 const Register Rindex = R11_scratch1; 539 locals_index(Rindex); 540 __ load_local_ptr(R17_tos, Rindex, Rindex); 541 } 542 543 void TemplateTable::locals_index_wide(Register Rdst) { 544 // Offset is 2, not 1, because Lbcp points to wide prefix code. 545 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 546 } 547 548 void TemplateTable::wide_iload() { 549 // Get the local value into tos. 550 551 const Register Rindex = R11_scratch1; 552 locals_index_wide(Rindex); 553 __ load_local_int(R17_tos, Rindex, Rindex); 554 } 555 556 void TemplateTable::wide_lload() { 557 transition(vtos, ltos); 558 559 const Register Rindex = R11_scratch1; 560 locals_index_wide(Rindex); 561 __ load_local_long(R17_tos, Rindex, Rindex); 562 } 563 564 void TemplateTable::wide_fload() { 565 transition(vtos, ftos); 566 567 const Register Rindex = R11_scratch1; 568 locals_index_wide(Rindex); 569 __ load_local_float(F15_ftos, Rindex, Rindex); 570 } 571 572 void TemplateTable::wide_dload() { 573 transition(vtos, dtos); 574 575 const Register Rindex = R11_scratch1; 576 locals_index_wide(Rindex); 577 __ load_local_double(F15_ftos, Rindex, Rindex); 578 } 579 580 void TemplateTable::wide_aload() { 581 transition(vtos, atos); 582 583 const Register Rindex = R11_scratch1; 584 locals_index_wide(Rindex); 585 __ load_local_ptr(R17_tos, Rindex, Rindex); 586 } 587 588 void TemplateTable::iaload() { 589 transition(itos, itos); 590 591 const Register Rload_addr = R3_ARG1, 592 Rarray = R4_ARG2, 593 Rtemp = R5_ARG3; 594 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 595 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 596 } 597 598 void TemplateTable::laload() { 599 transition(itos, ltos); 600 601 const Register Rload_addr = R3_ARG1, 602 Rarray = R4_ARG2, 603 Rtemp = R5_ARG3; 604 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 605 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 606 } 607 608 void TemplateTable::faload() { 609 transition(itos, ftos); 610 611 const Register Rload_addr = R3_ARG1, 612 Rarray = R4_ARG2, 613 Rtemp = R5_ARG3; 614 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 615 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 616 } 617 618 void TemplateTable::daload() { 619 transition(itos, dtos); 620 621 const Register Rload_addr = R3_ARG1, 622 Rarray = R4_ARG2, 623 Rtemp = R5_ARG3; 624 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 625 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 626 } 627 628 void TemplateTable::aaload() { 629 transition(itos, atos); 630 631 // tos: index 632 // result tos: array 633 const Register Rload_addr = R3_ARG1, 634 Rarray = R4_ARG2, 635 Rtemp = R5_ARG3; 636 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 637 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 638 __ verify_oop(R17_tos); 639 //__ dcbt(R17_tos); // prefetch 640 } 641 642 void TemplateTable::baload() { 643 transition(itos, itos); 644 645 const Register Rload_addr = R3_ARG1, 646 Rarray = R4_ARG2, 647 Rtemp = R5_ARG3; 648 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 649 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 650 __ extsb(R17_tos, R17_tos); 651 } 652 653 void TemplateTable::caload() { 654 transition(itos, itos); 655 656 const Register Rload_addr = R3_ARG1, 657 Rarray = R4_ARG2, 658 Rtemp = R5_ARG3; 659 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 660 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 661 } 662 663 // Iload followed by caload frequent pair. 664 void TemplateTable::fast_icaload() { 665 transition(vtos, itos); 666 667 const Register Rload_addr = R3_ARG1, 668 Rarray = R4_ARG2, 669 Rtemp = R11_scratch1; 670 671 locals_index(R17_tos); 672 __ load_local_int(R17_tos, Rtemp, R17_tos); 673 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 674 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 675 } 676 677 void TemplateTable::saload() { 678 transition(itos, itos); 679 680 const Register Rload_addr = R11_scratch1, 681 Rarray = R12_scratch2, 682 Rtemp = R3_ARG1; 683 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 684 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 685 } 686 687 void TemplateTable::iload(int n) { 688 transition(vtos, itos); 689 690 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 691 } 692 693 void TemplateTable::lload(int n) { 694 transition(vtos, ltos); 695 696 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 697 } 698 699 void TemplateTable::fload(int n) { 700 transition(vtos, ftos); 701 702 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 703 } 704 705 void TemplateTable::dload(int n) { 706 transition(vtos, dtos); 707 708 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 709 } 710 711 void TemplateTable::aload(int n) { 712 transition(vtos, atos); 713 714 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 715 } 716 717 void TemplateTable::aload_0() { 718 transition(vtos, atos); 719 // According to bytecode histograms, the pairs: 720 // 721 // _aload_0, _fast_igetfield 722 // _aload_0, _fast_agetfield 723 // _aload_0, _fast_fgetfield 724 // 725 // occur frequently. If RewriteFrequentPairs is set, the (slow) 726 // _aload_0 bytecode checks if the next bytecode is either 727 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 728 // rewrites the current bytecode into a pair bytecode; otherwise it 729 // rewrites the current bytecode into _0 that doesn't do 730 // the pair check anymore. 731 // 732 // Note: If the next bytecode is _getfield, the rewrite must be 733 // delayed, otherwise we may miss an opportunity for a pair. 734 // 735 // Also rewrite frequent pairs 736 // aload_0, aload_1 737 // aload_0, iload_1 738 // These bytecodes with a small amount of code are most profitable 739 // to rewrite. 740 741 if (RewriteFrequentPairs) { 742 743 Label Lrewrite, Ldont_rewrite; 744 Register Rnext_byte = R3_ARG1, 745 Rrewrite_to = R6_ARG4, 746 Rscratch = R11_scratch1; 747 748 // Get next byte. 749 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 750 751 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 752 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 753 __ beq(CCR0, Ldont_rewrite); 754 755 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 756 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 757 __ beq(CCR1, Lrewrite); 758 759 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 760 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 761 __ beq(CCR0, Lrewrite); 762 763 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 764 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 765 __ beq(CCR1, Lrewrite); 766 767 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 768 769 __ bind(Lrewrite); 770 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 771 __ bind(Ldont_rewrite); 772 } 773 774 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 775 aload(0); 776 } 777 778 void TemplateTable::istore() { 779 transition(itos, vtos); 780 781 const Register Rindex = R11_scratch1; 782 locals_index(Rindex); 783 __ store_local_int(R17_tos, Rindex); 784 } 785 786 void TemplateTable::lstore() { 787 transition(ltos, vtos); 788 const Register Rindex = R11_scratch1; 789 locals_index(Rindex); 790 __ store_local_long(R17_tos, Rindex); 791 } 792 793 void TemplateTable::fstore() { 794 transition(ftos, vtos); 795 796 const Register Rindex = R11_scratch1; 797 locals_index(Rindex); 798 __ store_local_float(F15_ftos, Rindex); 799 } 800 801 void TemplateTable::dstore() { 802 transition(dtos, vtos); 803 804 const Register Rindex = R11_scratch1; 805 locals_index(Rindex); 806 __ store_local_double(F15_ftos, Rindex); 807 } 808 809 void TemplateTable::astore() { 810 transition(vtos, vtos); 811 812 const Register Rindex = R11_scratch1; 813 __ pop_ptr(); 814 __ verify_oop_or_return_address(R17_tos, Rindex); 815 locals_index(Rindex); 816 __ store_local_ptr(R17_tos, Rindex); 817 } 818 819 void TemplateTable::wide_istore() { 820 transition(vtos, vtos); 821 822 const Register Rindex = R11_scratch1; 823 __ pop_i(); 824 locals_index_wide(Rindex); 825 __ store_local_int(R17_tos, Rindex); 826 } 827 828 void TemplateTable::wide_lstore() { 829 transition(vtos, vtos); 830 831 const Register Rindex = R11_scratch1; 832 __ pop_l(); 833 locals_index_wide(Rindex); 834 __ store_local_long(R17_tos, Rindex); 835 } 836 837 void TemplateTable::wide_fstore() { 838 transition(vtos, vtos); 839 840 const Register Rindex = R11_scratch1; 841 __ pop_f(); 842 locals_index_wide(Rindex); 843 __ store_local_float(F15_ftos, Rindex); 844 } 845 846 void TemplateTable::wide_dstore() { 847 transition(vtos, vtos); 848 849 const Register Rindex = R11_scratch1; 850 __ pop_d(); 851 locals_index_wide(Rindex); 852 __ store_local_double(F15_ftos, Rindex); 853 } 854 855 void TemplateTable::wide_astore() { 856 transition(vtos, vtos); 857 858 const Register Rindex = R11_scratch1; 859 __ pop_ptr(); 860 __ verify_oop_or_return_address(R17_tos, Rindex); 861 locals_index_wide(Rindex); 862 __ store_local_ptr(R17_tos, Rindex); 863 } 864 865 void TemplateTable::iastore() { 866 transition(itos, vtos); 867 868 const Register Rindex = R3_ARG1, 869 Rstore_addr = R4_ARG2, 870 Rarray = R5_ARG3, 871 Rtemp = R6_ARG4; 872 __ pop_i(Rindex); 873 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 874 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 875 } 876 877 void TemplateTable::lastore() { 878 transition(ltos, vtos); 879 880 const Register Rindex = R3_ARG1, 881 Rstore_addr = R4_ARG2, 882 Rarray = R5_ARG3, 883 Rtemp = R6_ARG4; 884 __ pop_i(Rindex); 885 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 886 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 887 } 888 889 void TemplateTable::fastore() { 890 transition(ftos, vtos); 891 892 const Register Rindex = R3_ARG1, 893 Rstore_addr = R4_ARG2, 894 Rarray = R5_ARG3, 895 Rtemp = R6_ARG4; 896 __ pop_i(Rindex); 897 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 898 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 899 } 900 901 void TemplateTable::dastore() { 902 transition(dtos, vtos); 903 904 const Register Rindex = R3_ARG1, 905 Rstore_addr = R4_ARG2, 906 Rarray = R5_ARG3, 907 Rtemp = R6_ARG4; 908 __ pop_i(Rindex); 909 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 910 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 911 } 912 913 // Pop 3 values from the stack and... 914 void TemplateTable::aastore() { 915 transition(vtos, vtos); 916 917 Label Lstore_ok, Lis_null, Ldone; 918 const Register Rindex = R3_ARG1, 919 Rarray = R4_ARG2, 920 Rscratch = R11_scratch1, 921 Rscratch2 = R12_scratch2, 922 Rarray_klass = R5_ARG3, 923 Rarray_element_klass = Rarray_klass, 924 Rvalue_klass = R6_ARG4, 925 Rstore_addr = R31; // Use register which survives VM call. 926 927 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 928 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 929 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 930 931 __ verify_oop(R17_tos); 932 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 933 // Rindex is dead! 934 Register Rscratch3 = Rindex; 935 936 // Do array store check - check for NULL value first. 937 __ cmpdi(CCR0, R17_tos, 0); 938 __ beq(CCR0, Lis_null); 939 940 __ load_klass(Rarray_klass, Rarray); 941 __ load_klass(Rvalue_klass, R17_tos); 942 943 // Do fast instanceof cache test. 944 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 945 946 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 947 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 948 949 // Fell through: subtype check failed => throw an exception. 950 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 951 __ mtctr(R11_scratch1); 952 __ bctr(); 953 954 __ bind(Lis_null); 955 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 956 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 957 __ profile_null_seen(Rscratch, Rscratch2); 958 __ b(Ldone); 959 960 // Store is OK. 961 __ bind(Lstore_ok); 962 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 963 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 964 965 __ bind(Ldone); 966 // Adjust sp (pops array, index and value). 967 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 968 } 969 970 void TemplateTable::bastore() { 971 transition(itos, vtos); 972 973 const Register Rindex = R11_scratch1, 974 Rarray = R12_scratch2, 975 Rscratch = R3_ARG1; 976 __ pop_i(Rindex); 977 // tos: val 978 // Rarray: array ptr (popped by index_check) 979 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray); 980 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 981 } 982 983 void TemplateTable::castore() { 984 transition(itos, vtos); 985 986 const Register Rindex = R11_scratch1, 987 Rarray = R12_scratch2, 988 Rscratch = R3_ARG1; 989 __ pop_i(Rindex); 990 // tos: val 991 // Rarray: array ptr (popped by index_check) 992 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 993 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 994 } 995 996 void TemplateTable::sastore() { 997 castore(); 998 } 999 1000 void TemplateTable::istore(int n) { 1001 transition(itos, vtos); 1002 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1003 } 1004 1005 void TemplateTable::lstore(int n) { 1006 transition(ltos, vtos); 1007 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1008 } 1009 1010 void TemplateTable::fstore(int n) { 1011 transition(ftos, vtos); 1012 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1013 } 1014 1015 void TemplateTable::dstore(int n) { 1016 transition(dtos, vtos); 1017 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1018 } 1019 1020 void TemplateTable::astore(int n) { 1021 transition(vtos, vtos); 1022 1023 __ pop_ptr(); 1024 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1025 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1026 } 1027 1028 void TemplateTable::pop() { 1029 transition(vtos, vtos); 1030 1031 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1032 } 1033 1034 void TemplateTable::pop2() { 1035 transition(vtos, vtos); 1036 1037 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1038 } 1039 1040 void TemplateTable::dup() { 1041 transition(vtos, vtos); 1042 1043 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1044 __ push_ptr(R11_scratch1); 1045 } 1046 1047 void TemplateTable::dup_x1() { 1048 transition(vtos, vtos); 1049 1050 Register Ra = R11_scratch1, 1051 Rb = R12_scratch2; 1052 // stack: ..., a, b 1053 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1054 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1055 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1056 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1057 __ push_ptr(Rb); 1058 // stack: ..., b, a, b 1059 } 1060 1061 void TemplateTable::dup_x2() { 1062 transition(vtos, vtos); 1063 1064 Register Ra = R11_scratch1, 1065 Rb = R12_scratch2, 1066 Rc = R3_ARG1; 1067 1068 // stack: ..., a, b, c 1069 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1070 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1071 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1072 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1073 // stack: ..., c, b, c 1074 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1075 // stack: ..., c, a, c 1076 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1077 __ push_ptr(Rc); // push c 1078 // stack: ..., c, a, b, c 1079 } 1080 1081 void TemplateTable::dup2() { 1082 transition(vtos, vtos); 1083 1084 Register Ra = R11_scratch1, 1085 Rb = R12_scratch2; 1086 // stack: ..., a, b 1087 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1088 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1089 __ push_2ptrs(Ra, Rb); 1090 // stack: ..., a, b, a, b 1091 } 1092 1093 void TemplateTable::dup2_x1() { 1094 transition(vtos, vtos); 1095 1096 Register Ra = R11_scratch1, 1097 Rb = R12_scratch2, 1098 Rc = R3_ARG1; 1099 // stack: ..., a, b, c 1100 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1101 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1102 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1103 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1104 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1105 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1106 // stack: ..., b, c, a 1107 __ push_2ptrs(Rb, Rc); 1108 // stack: ..., b, c, a, b, c 1109 } 1110 1111 void TemplateTable::dup2_x2() { 1112 transition(vtos, vtos); 1113 1114 Register Ra = R11_scratch1, 1115 Rb = R12_scratch2, 1116 Rc = R3_ARG1, 1117 Rd = R4_ARG2; 1118 // stack: ..., a, b, c, d 1119 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1120 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1121 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1122 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1123 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1124 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1125 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1126 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1127 // stack: ..., c, d, a, b 1128 __ push_2ptrs(Rc, Rd); 1129 // stack: ..., c, d, a, b, c, d 1130 } 1131 1132 void TemplateTable::swap() { 1133 transition(vtos, vtos); 1134 // stack: ..., a, b 1135 1136 Register Ra = R11_scratch1, 1137 Rb = R12_scratch2; 1138 // stack: ..., a, b 1139 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1140 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1141 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1142 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1143 // stack: ..., b, a 1144 } 1145 1146 void TemplateTable::iop2(Operation op) { 1147 transition(itos, itos); 1148 1149 Register Rscratch = R11_scratch1; 1150 1151 __ pop_i(Rscratch); 1152 // tos = number of bits to shift 1153 // Rscratch = value to shift 1154 switch (op) { 1155 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1156 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1157 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1158 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1159 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1160 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1161 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1162 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1163 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1164 default: ShouldNotReachHere(); 1165 } 1166 } 1167 1168 void TemplateTable::lop2(Operation op) { 1169 transition(ltos, ltos); 1170 1171 Register Rscratch = R11_scratch1; 1172 __ pop_l(Rscratch); 1173 switch (op) { 1174 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1175 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1176 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1177 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1178 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1179 default: ShouldNotReachHere(); 1180 } 1181 } 1182 1183 void TemplateTable::idiv() { 1184 transition(itos, itos); 1185 1186 Label Lnormal, Lexception, Ldone; 1187 Register Rdividend = R11_scratch1; // Used by irem. 1188 1189 __ addi(R0, R17_tos, 1); 1190 __ cmplwi(CCR0, R0, 2); 1191 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1192 1193 __ cmpwi(CCR1, R17_tos, 0); 1194 __ beq(CCR1, Lexception); // divisor == 0 1195 1196 __ pop_i(Rdividend); 1197 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1198 __ b(Ldone); 1199 1200 __ bind(Lexception); 1201 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1202 __ mtctr(R11_scratch1); 1203 __ bctr(); 1204 1205 __ align(32, 12); 1206 __ bind(Lnormal); 1207 __ pop_i(Rdividend); 1208 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1209 __ bind(Ldone); 1210 } 1211 1212 void TemplateTable::irem() { 1213 transition(itos, itos); 1214 1215 __ mr(R12_scratch2, R17_tos); 1216 idiv(); 1217 __ mullw(R17_tos, R17_tos, R12_scratch2); 1218 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1219 } 1220 1221 void TemplateTable::lmul() { 1222 transition(ltos, ltos); 1223 1224 __ pop_l(R11_scratch1); 1225 __ mulld(R17_tos, R11_scratch1, R17_tos); 1226 } 1227 1228 void TemplateTable::ldiv() { 1229 transition(ltos, ltos); 1230 1231 Label Lnormal, Lexception, Ldone; 1232 Register Rdividend = R11_scratch1; // Used by lrem. 1233 1234 __ addi(R0, R17_tos, 1); 1235 __ cmpldi(CCR0, R0, 2); 1236 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1237 1238 __ cmpdi(CCR1, R17_tos, 0); 1239 __ beq(CCR1, Lexception); // divisor == 0 1240 1241 __ pop_l(Rdividend); 1242 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1243 __ b(Ldone); 1244 1245 __ bind(Lexception); 1246 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1247 __ mtctr(R11_scratch1); 1248 __ bctr(); 1249 1250 __ align(32, 12); 1251 __ bind(Lnormal); 1252 __ pop_l(Rdividend); 1253 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1254 __ bind(Ldone); 1255 } 1256 1257 void TemplateTable::lrem() { 1258 transition(ltos, ltos); 1259 1260 __ mr(R12_scratch2, R17_tos); 1261 ldiv(); 1262 __ mulld(R17_tos, R17_tos, R12_scratch2); 1263 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1264 } 1265 1266 void TemplateTable::lshl() { 1267 transition(itos, ltos); 1268 1269 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1270 __ pop_l(R11_scratch1); 1271 __ sld(R17_tos, R11_scratch1, R17_tos); 1272 } 1273 1274 void TemplateTable::lshr() { 1275 transition(itos, ltos); 1276 1277 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1278 __ pop_l(R11_scratch1); 1279 __ srad(R17_tos, R11_scratch1, R17_tos); 1280 } 1281 1282 void TemplateTable::lushr() { 1283 transition(itos, ltos); 1284 1285 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1286 __ pop_l(R11_scratch1); 1287 __ srd(R17_tos, R11_scratch1, R17_tos); 1288 } 1289 1290 void TemplateTable::fop2(Operation op) { 1291 transition(ftos, ftos); 1292 1293 switch (op) { 1294 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1295 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1296 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1297 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1298 case rem: 1299 __ pop_f(F1_ARG1); 1300 __ fmr(F2_ARG2, F15_ftos); 1301 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1302 __ fmr(F15_ftos, F1_RET); 1303 break; 1304 1305 default: ShouldNotReachHere(); 1306 } 1307 } 1308 1309 void TemplateTable::dop2(Operation op) { 1310 transition(dtos, dtos); 1311 1312 switch (op) { 1313 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1314 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1315 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1316 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1317 case rem: 1318 __ pop_d(F1_ARG1); 1319 __ fmr(F2_ARG2, F15_ftos); 1320 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1321 __ fmr(F15_ftos, F1_RET); 1322 break; 1323 1324 default: ShouldNotReachHere(); 1325 } 1326 } 1327 1328 // Negate the value in the TOS cache. 1329 void TemplateTable::ineg() { 1330 transition(itos, itos); 1331 1332 __ neg(R17_tos, R17_tos); 1333 } 1334 1335 // Negate the value in the TOS cache. 1336 void TemplateTable::lneg() { 1337 transition(ltos, ltos); 1338 1339 __ neg(R17_tos, R17_tos); 1340 } 1341 1342 void TemplateTable::fneg() { 1343 transition(ftos, ftos); 1344 1345 __ fneg(F15_ftos, F15_ftos); 1346 } 1347 1348 void TemplateTable::dneg() { 1349 transition(dtos, dtos); 1350 1351 __ fneg(F15_ftos, F15_ftos); 1352 } 1353 1354 // Increments a local variable in place. 1355 void TemplateTable::iinc() { 1356 transition(vtos, vtos); 1357 1358 const Register Rindex = R11_scratch1, 1359 Rincrement = R0, 1360 Rvalue = R12_scratch2; 1361 1362 locals_index(Rindex); // Load locals index from bytecode stream. 1363 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1364 __ extsb(Rincrement, Rincrement); 1365 1366 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1367 1368 __ add(Rvalue, Rincrement, Rvalue); 1369 __ stw(Rvalue, 0, Rindex); 1370 } 1371 1372 void TemplateTable::wide_iinc() { 1373 transition(vtos, vtos); 1374 1375 Register Rindex = R11_scratch1, 1376 Rlocals_addr = Rindex, 1377 Rincr = R12_scratch2; 1378 locals_index_wide(Rindex); 1379 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1380 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1381 __ add(R17_tos, Rincr, R17_tos); 1382 __ stw(R17_tos, 0, Rlocals_addr); 1383 } 1384 1385 void TemplateTable::convert() { 1386 // %%%%% Factor this first part accross platforms 1387 #ifdef ASSERT 1388 TosState tos_in = ilgl; 1389 TosState tos_out = ilgl; 1390 switch (bytecode()) { 1391 case Bytecodes::_i2l: // fall through 1392 case Bytecodes::_i2f: // fall through 1393 case Bytecodes::_i2d: // fall through 1394 case Bytecodes::_i2b: // fall through 1395 case Bytecodes::_i2c: // fall through 1396 case Bytecodes::_i2s: tos_in = itos; break; 1397 case Bytecodes::_l2i: // fall through 1398 case Bytecodes::_l2f: // fall through 1399 case Bytecodes::_l2d: tos_in = ltos; break; 1400 case Bytecodes::_f2i: // fall through 1401 case Bytecodes::_f2l: // fall through 1402 case Bytecodes::_f2d: tos_in = ftos; break; 1403 case Bytecodes::_d2i: // fall through 1404 case Bytecodes::_d2l: // fall through 1405 case Bytecodes::_d2f: tos_in = dtos; break; 1406 default : ShouldNotReachHere(); 1407 } 1408 switch (bytecode()) { 1409 case Bytecodes::_l2i: // fall through 1410 case Bytecodes::_f2i: // fall through 1411 case Bytecodes::_d2i: // fall through 1412 case Bytecodes::_i2b: // fall through 1413 case Bytecodes::_i2c: // fall through 1414 case Bytecodes::_i2s: tos_out = itos; break; 1415 case Bytecodes::_i2l: // fall through 1416 case Bytecodes::_f2l: // fall through 1417 case Bytecodes::_d2l: tos_out = ltos; break; 1418 case Bytecodes::_i2f: // fall through 1419 case Bytecodes::_l2f: // fall through 1420 case Bytecodes::_d2f: tos_out = ftos; break; 1421 case Bytecodes::_i2d: // fall through 1422 case Bytecodes::_l2d: // fall through 1423 case Bytecodes::_f2d: tos_out = dtos; break; 1424 default : ShouldNotReachHere(); 1425 } 1426 transition(tos_in, tos_out); 1427 #endif 1428 1429 // Conversion 1430 Label done; 1431 switch (bytecode()) { 1432 case Bytecodes::_i2l: 1433 __ extsw(R17_tos, R17_tos); 1434 break; 1435 1436 case Bytecodes::_l2i: 1437 // Nothing to do, we'll continue to work with the lower bits. 1438 break; 1439 1440 case Bytecodes::_i2b: 1441 __ extsb(R17_tos, R17_tos); 1442 break; 1443 1444 case Bytecodes::_i2c: 1445 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1446 break; 1447 1448 case Bytecodes::_i2s: 1449 __ extsh(R17_tos, R17_tos); 1450 break; 1451 1452 case Bytecodes::_i2d: 1453 __ extsw(R17_tos, R17_tos); 1454 case Bytecodes::_l2d: 1455 __ push_l_pop_d(); 1456 __ fcfid(F15_ftos, F15_ftos); 1457 break; 1458 1459 case Bytecodes::_i2f: 1460 __ extsw(R17_tos, R17_tos); 1461 __ push_l_pop_d(); 1462 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1463 // Comment: alternatively, load with sign extend could be done by lfiwax. 1464 __ fcfids(F15_ftos, F15_ftos); 1465 } else { 1466 __ fcfid(F15_ftos, F15_ftos); 1467 __ frsp(F15_ftos, F15_ftos); 1468 } 1469 break; 1470 1471 case Bytecodes::_l2f: 1472 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1473 __ push_l_pop_d(); 1474 __ fcfids(F15_ftos, F15_ftos); 1475 } else { 1476 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1477 __ mr(R3_ARG1, R17_tos); 1478 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1479 __ fmr(F15_ftos, F1_RET); 1480 } 1481 break; 1482 1483 case Bytecodes::_f2d: 1484 // empty 1485 break; 1486 1487 case Bytecodes::_d2f: 1488 __ frsp(F15_ftos, F15_ftos); 1489 break; 1490 1491 case Bytecodes::_d2i: 1492 case Bytecodes::_f2i: 1493 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1494 __ li(R17_tos, 0); // 0 in case of NAN 1495 __ bso(CCR0, done); 1496 __ fctiwz(F15_ftos, F15_ftos); 1497 __ push_d_pop_l(); 1498 break; 1499 1500 case Bytecodes::_d2l: 1501 case Bytecodes::_f2l: 1502 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1503 __ li(R17_tos, 0); // 0 in case of NAN 1504 __ bso(CCR0, done); 1505 __ fctidz(F15_ftos, F15_ftos); 1506 __ push_d_pop_l(); 1507 break; 1508 1509 default: ShouldNotReachHere(); 1510 } 1511 __ bind(done); 1512 } 1513 1514 // Long compare 1515 void TemplateTable::lcmp() { 1516 transition(ltos, itos); 1517 1518 const Register Rscratch = R11_scratch1; 1519 __ pop_l(Rscratch); // first operand, deeper in stack 1520 1521 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1522 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1523 __ srwi(Rscratch, R17_tos, 30); 1524 __ srawi(R17_tos, R17_tos, 31); 1525 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1526 } 1527 1528 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1529 // unordered_result == -1 => fcmpl or dcmpl 1530 // unordered_result == 1 => fcmpg or dcmpg 1531 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1532 const FloatRegister Rfirst = F0_SCRATCH, 1533 Rsecond = F15_ftos; 1534 const Register Rscratch = R11_scratch1; 1535 1536 if (is_float) { 1537 __ pop_f(Rfirst); 1538 } else { 1539 __ pop_d(Rfirst); 1540 } 1541 1542 Label Lunordered, Ldone; 1543 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1544 if (unordered_result) { 1545 __ bso(CCR0, Lunordered); 1546 } 1547 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1548 __ srwi(Rscratch, R17_tos, 30); 1549 __ srawi(R17_tos, R17_tos, 31); 1550 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1551 if (unordered_result) { 1552 __ b(Ldone); 1553 __ bind(Lunordered); 1554 __ load_const_optimized(R17_tos, unordered_result); 1555 } 1556 __ bind(Ldone); 1557 } 1558 1559 // Branch_conditional which takes TemplateTable::Condition. 1560 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1561 bool positive = false; 1562 Assembler::Condition cond = Assembler::equal; 1563 switch (cc) { 1564 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1565 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1566 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1567 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1568 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1569 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1570 default: ShouldNotReachHere(); 1571 } 1572 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1573 int bi = Assembler::bi0(crx, cond); 1574 __ bc(bo, bi, L); 1575 } 1576 1577 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1578 1579 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1580 __ verify_thread(); 1581 1582 const Register Rscratch1 = R11_scratch1, 1583 Rscratch2 = R12_scratch2, 1584 Rscratch3 = R3_ARG1, 1585 R4_counters = R4_ARG2, 1586 bumped_count = R31, 1587 Rdisp = R22_tmp2; 1588 1589 __ profile_taken_branch(Rscratch1, bumped_count); 1590 1591 // Get (wide) offset. 1592 if (is_wide) { 1593 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1594 } else { 1595 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1596 } 1597 1598 // -------------------------------------------------------------------------- 1599 // Handle all the JSR stuff here, then exit. 1600 // It's much shorter and cleaner than intermingling with the 1601 // non-JSR normal-branch stuff occurring below. 1602 if (is_jsr) { 1603 // Compute return address as bci in Otos_i. 1604 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1605 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1606 __ subf(R17_tos, Rscratch1, Rscratch2); 1607 1608 // Bump bcp to target of JSR. 1609 __ add(R14_bcp, Rdisp, R14_bcp); 1610 // Push returnAddress for "ret" on stack. 1611 __ push_ptr(R17_tos); 1612 // And away we go! 1613 __ dispatch_next(vtos); 1614 return; 1615 } 1616 1617 // -------------------------------------------------------------------------- 1618 // Normal (non-jsr) branch handling 1619 1620 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1621 if (increment_invocation_counter_for_backward_branches) { 1622 //__ unimplemented("branch invocation counter"); 1623 1624 Label Lforward; 1625 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1626 1627 // Check branch direction. 1628 __ cmpdi(CCR0, Rdisp, 0); 1629 __ bgt(CCR0, Lforward); 1630 1631 __ get_method_counters(R19_method, R4_counters, Lforward); 1632 1633 if (TieredCompilation) { 1634 Label Lno_mdo, Loverflow; 1635 const int increment = InvocationCounter::count_increment; 1636 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1637 if (ProfileInterpreter) { 1638 Register Rmdo = Rscratch1; 1639 1640 // If no method data exists, go to profile_continue. 1641 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1642 __ cmpdi(CCR0, Rmdo, 0); 1643 __ beq(CCR0, Lno_mdo); 1644 1645 // Increment backedge counter in the MDO. 1646 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1647 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1648 __ load_const_optimized(Rscratch3, mask, R0); 1649 __ addi(Rscratch2, Rscratch2, increment); 1650 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1651 __ and_(Rscratch3, Rscratch2, Rscratch3); 1652 __ bne(CCR0, Lforward); 1653 __ b(Loverflow); 1654 } 1655 1656 // If there's no MDO, increment counter in method. 1657 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1658 __ bind(Lno_mdo); 1659 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1660 __ load_const_optimized(Rscratch3, mask, R0); 1661 __ addi(Rscratch2, Rscratch2, increment); 1662 __ stw(Rscratch2, mo_bc_offs, R19_method); 1663 __ and_(Rscratch3, Rscratch2, Rscratch3); 1664 __ bne(CCR0, Lforward); 1665 1666 __ bind(Loverflow); 1667 1668 // Notify point for loop, pass branch bytecode. 1669 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1670 1671 // Was an OSR adapter generated? 1672 // O0 = osr nmethod 1673 __ cmpdi(CCR0, R3_RET, 0); 1674 __ beq(CCR0, Lforward); 1675 1676 // Has the nmethod been invalidated already? 1677 __ lwz(R0, nmethod::entry_bci_offset(), R3_RET); 1678 __ cmpwi(CCR0, R0, InvalidOSREntryBci); 1679 __ beq(CCR0, Lforward); 1680 1681 // Migrate the interpreter frame off of the stack. 1682 // We can use all registers because we will not return to interpreter from this point. 1683 1684 // Save nmethod. 1685 const Register osr_nmethod = R31; 1686 __ mr(osr_nmethod, R3_RET); 1687 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1688 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1689 __ reset_last_Java_frame(); 1690 // OSR buffer is in ARG1. 1691 1692 // Remove the interpreter frame. 1693 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1694 1695 // Jump to the osr code. 1696 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1697 __ mtlr(R0); 1698 __ mtctr(R11_scratch1); 1699 __ bctr(); 1700 1701 } else { 1702 1703 const Register invoke_ctr = Rscratch1; 1704 // Update Backedge branch separately from invocations. 1705 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1706 1707 if (ProfileInterpreter) { 1708 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1709 if (UseOnStackReplacement) { 1710 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1711 } 1712 } else { 1713 if (UseOnStackReplacement) { 1714 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1715 } 1716 } 1717 } 1718 1719 __ bind(Lforward); 1720 1721 } else { 1722 // Bump bytecode pointer by displacement (take the branch). 1723 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1724 } 1725 // Continue with bytecode @ target. 1726 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1727 // %%%%% and changing dispatch_next to dispatch_only. 1728 __ dispatch_next(vtos); 1729 } 1730 1731 // Helper function for if_cmp* methods below. 1732 // Factored out common compare and branch code. 1733 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1734 Label Lnot_taken; 1735 // Note: The condition code we get is the condition under which we 1736 // *fall through*! So we have to inverse the CC here. 1737 1738 if (is_jint) { 1739 if (cmp0) { 1740 __ cmpwi(CCR0, Rfirst, 0); 1741 } else { 1742 __ cmpw(CCR0, Rfirst, Rsecond); 1743 } 1744 } else { 1745 if (cmp0) { 1746 __ cmpdi(CCR0, Rfirst, 0); 1747 } else { 1748 __ cmpd(CCR0, Rfirst, Rsecond); 1749 } 1750 } 1751 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1752 1753 // Conition is false => Jump! 1754 branch(false, false); 1755 1756 // Condition is not true => Continue. 1757 __ align(32, 12); 1758 __ bind(Lnot_taken); 1759 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1760 } 1761 1762 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1763 void TemplateTable::if_0cmp(Condition cc) { 1764 transition(itos, vtos); 1765 1766 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1767 } 1768 1769 // Compare integer values and fall through if CC holds, branch away otherwise. 1770 // 1771 // Interface: 1772 // - Rfirst: First operand (older stack value) 1773 // - tos: Second operand (younger stack value) 1774 void TemplateTable::if_icmp(Condition cc) { 1775 transition(itos, vtos); 1776 1777 const Register Rfirst = R0, 1778 Rsecond = R17_tos; 1779 1780 __ pop_i(Rfirst); 1781 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1782 } 1783 1784 void TemplateTable::if_nullcmp(Condition cc) { 1785 transition(atos, vtos); 1786 1787 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1788 } 1789 1790 void TemplateTable::if_acmp(Condition cc) { 1791 transition(atos, vtos); 1792 1793 const Register Rfirst = R0, 1794 Rsecond = R17_tos; 1795 1796 __ pop_ptr(Rfirst); 1797 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1798 } 1799 1800 void TemplateTable::ret() { 1801 locals_index(R11_scratch1); 1802 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1803 1804 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1805 1806 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1807 __ add(R11_scratch1, R17_tos, R11_scratch1); 1808 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1809 __ dispatch_next(vtos); 1810 } 1811 1812 void TemplateTable::wide_ret() { 1813 transition(vtos, vtos); 1814 1815 const Register Rindex = R3_ARG1, 1816 Rscratch1 = R11_scratch1, 1817 Rscratch2 = R12_scratch2; 1818 1819 locals_index_wide(Rindex); 1820 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1821 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1822 // Tos now contains the bci, compute the bcp from that. 1823 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1824 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1825 __ add(R14_bcp, Rscratch1, Rscratch2); 1826 __ dispatch_next(vtos); 1827 } 1828 1829 void TemplateTable::tableswitch() { 1830 transition(itos, vtos); 1831 1832 Label Ldispatch, Ldefault_case; 1833 Register Rlow_byte = R3_ARG1, 1834 Rindex = Rlow_byte, 1835 Rhigh_byte = R4_ARG2, 1836 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1837 Rscratch1 = R11_scratch1, 1838 Rscratch2 = R12_scratch2, 1839 Roffset = R6_ARG4; 1840 1841 // Align bcp. 1842 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1843 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1844 1845 // Load lo & hi. 1846 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1847 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1848 1849 // Check for default case (=index outside [low,high]). 1850 __ cmpw(CCR0, R17_tos, Rlow_byte); 1851 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1852 __ blt(CCR0, Ldefault_case); 1853 __ bgt(CCR1, Ldefault_case); 1854 1855 // Lookup dispatch offset. 1856 __ sub(Rindex, R17_tos, Rlow_byte); 1857 __ extsw(Rindex, Rindex); 1858 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1859 __ sldi(Rindex, Rindex, LogBytesPerInt); 1860 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1861 #if defined(VM_LITTLE_ENDIAN) 1862 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1863 __ extsw(Roffset, Roffset); 1864 #else 1865 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1866 #endif 1867 __ b(Ldispatch); 1868 1869 __ bind(Ldefault_case); 1870 __ profile_switch_default(Rhigh_byte, Rscratch1); 1871 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1872 1873 __ bind(Ldispatch); 1874 1875 __ add(R14_bcp, Roffset, R14_bcp); 1876 __ dispatch_next(vtos); 1877 } 1878 1879 void TemplateTable::lookupswitch() { 1880 transition(itos, itos); 1881 __ stop("lookupswitch bytecode should have been rewritten"); 1882 } 1883 1884 // Table switch using linear search through cases. 1885 // Bytecode stream format: 1886 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1887 // Note: Everything is big-endian format here. 1888 void TemplateTable::fast_linearswitch() { 1889 transition(itos, vtos); 1890 1891 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1892 Register Rcount = R3_ARG1, 1893 Rcurrent_pair = R4_ARG2, 1894 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1895 Roffset = R31, // Might need to survive C call. 1896 Rvalue = R12_scratch2, 1897 Rscratch = R11_scratch1, 1898 Rcmp_value = R17_tos; 1899 1900 // Align bcp. 1901 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1902 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1903 1904 // Setup loop counter and limit. 1905 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1906 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1907 1908 __ mtctr(Rcount); 1909 __ cmpwi(CCR0, Rcount, 0); 1910 __ bne(CCR0, Lloop_entry); 1911 1912 // Default case 1913 __ bind(Ldefault_case); 1914 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1915 if (ProfileInterpreter) { 1916 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1917 } 1918 __ b(Lcontinue_execution); 1919 1920 // Next iteration 1921 __ bind(Lsearch_loop); 1922 __ bdz(Ldefault_case); 1923 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1924 __ bind(Lloop_entry); 1925 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1926 __ cmpw(CCR0, Rvalue, Rcmp_value); 1927 __ bne(CCR0, Lsearch_loop); 1928 1929 // Found, load offset. 1930 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1931 // Calculate case index and profile 1932 __ mfctr(Rcurrent_pair); 1933 if (ProfileInterpreter) { 1934 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1935 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1936 } 1937 1938 __ bind(Lcontinue_execution); 1939 __ add(R14_bcp, Roffset, R14_bcp); 1940 __ dispatch_next(vtos); 1941 } 1942 1943 // Table switch using binary search (value/offset pairs are ordered). 1944 // Bytecode stream format: 1945 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1946 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1947 void TemplateTable::fast_binaryswitch() { 1948 1949 transition(itos, vtos); 1950 // Implementation using the following core algorithm: (copied from Intel) 1951 // 1952 // int binary_search(int key, LookupswitchPair* array, int n) { 1953 // // Binary search according to "Methodik des Programmierens" by 1954 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1955 // int i = 0; 1956 // int j = n; 1957 // while (i+1 < j) { 1958 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1959 // // with Q: for all i: 0 <= i < n: key < a[i] 1960 // // where a stands for the array and assuming that the (inexisting) 1961 // // element a[n] is infinitely big. 1962 // int h = (i + j) >> 1; 1963 // // i < h < j 1964 // if (key < array[h].fast_match()) { 1965 // j = h; 1966 // } else { 1967 // i = h; 1968 // } 1969 // } 1970 // // R: a[i] <= key < a[i+1] or Q 1971 // // (i.e., if key is within array, i is the correct index) 1972 // return i; 1973 // } 1974 1975 // register allocation 1976 const Register Rkey = R17_tos; // already set (tosca) 1977 const Register Rarray = R3_ARG1; 1978 const Register Ri = R4_ARG2; 1979 const Register Rj = R5_ARG3; 1980 const Register Rh = R6_ARG4; 1981 const Register Rscratch = R11_scratch1; 1982 1983 const int log_entry_size = 3; 1984 const int entry_size = 1 << log_entry_size; 1985 1986 Label found; 1987 1988 // Find Array start, 1989 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1990 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1991 1992 // initialize i & j 1993 __ li(Ri,0); 1994 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 1995 1996 // and start. 1997 Label entry; 1998 __ b(entry); 1999 2000 // binary search loop 2001 { Label loop; 2002 __ bind(loop); 2003 // int h = (i + j) >> 1; 2004 __ srdi(Rh, Rh, 1); 2005 // if (key < array[h].fast_match()) { 2006 // j = h; 2007 // } else { 2008 // i = h; 2009 // } 2010 __ sldi(Rscratch, Rh, log_entry_size); 2011 #if defined(VM_LITTLE_ENDIAN) 2012 __ lwbrx(Rscratch, Rscratch, Rarray); 2013 #else 2014 __ lwzx(Rscratch, Rscratch, Rarray); 2015 #endif 2016 2017 // if (key < current value) 2018 // Rh = Rj 2019 // else 2020 // Rh = Ri 2021 Label Lgreater; 2022 __ cmpw(CCR0, Rkey, Rscratch); 2023 __ bge(CCR0, Lgreater); 2024 __ mr(Rj, Rh); 2025 __ b(entry); 2026 __ bind(Lgreater); 2027 __ mr(Ri, Rh); 2028 2029 // while (i+1 < j) 2030 __ bind(entry); 2031 __ addi(Rscratch, Ri, 1); 2032 __ cmpw(CCR0, Rscratch, Rj); 2033 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2034 2035 __ blt(CCR0, loop); 2036 } 2037 2038 // End of binary search, result index is i (must check again!). 2039 Label default_case; 2040 Label continue_execution; 2041 if (ProfileInterpreter) { 2042 __ mr(Rh, Ri); // Save index in i for profiling. 2043 } 2044 // Ri = value offset 2045 __ sldi(Ri, Ri, log_entry_size); 2046 __ add(Ri, Ri, Rarray); 2047 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2048 2049 Label not_found; 2050 // Ri = offset offset 2051 __ cmpw(CCR0, Rkey, Rscratch); 2052 __ beq(CCR0, not_found); 2053 // entry not found -> j = default offset 2054 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2055 __ b(default_case); 2056 2057 __ bind(not_found); 2058 // entry found -> j = offset 2059 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2060 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2061 2062 if (ProfileInterpreter) { 2063 __ b(continue_execution); 2064 } 2065 2066 __ bind(default_case); // fall through (if not profiling) 2067 __ profile_switch_default(Ri, Rscratch); 2068 2069 __ bind(continue_execution); 2070 2071 __ extsw(Rj, Rj); 2072 __ add(R14_bcp, Rj, R14_bcp); 2073 __ dispatch_next(vtos); 2074 } 2075 2076 void TemplateTable::_return(TosState state) { 2077 transition(state, state); 2078 assert(_desc->calls_vm(), 2079 "inconsistent calls_vm information"); // call in remove_activation 2080 2081 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2082 2083 Register Rscratch = R11_scratch1, 2084 Rklass = R12_scratch2, 2085 Rklass_flags = Rklass; 2086 Label Lskip_register_finalizer; 2087 2088 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2089 assert(state == vtos, "only valid state"); 2090 __ ld(R17_tos, 0, R18_locals); 2091 2092 // Load klass of this obj. 2093 __ load_klass(Rklass, R17_tos); 2094 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2095 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2096 __ bfalse(CCR0, Lskip_register_finalizer); 2097 2098 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2099 2100 __ align(32, 12); 2101 __ bind(Lskip_register_finalizer); 2102 } 2103 2104 // Move the result value into the correct register and remove memory stack frame. 2105 __ remove_activation(state, /* throw_monitor_exception */ true); 2106 // Restoration of lr done by remove_activation. 2107 switch (state) { 2108 case ltos: 2109 case btos: 2110 case ctos: 2111 case stos: 2112 case atos: 2113 case itos: __ mr(R3_RET, R17_tos); break; 2114 case ftos: 2115 case dtos: __ fmr(F1_RET, F15_ftos); break; 2116 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2117 // to get visible before the reference to the object gets stored anywhere. 2118 __ membar(Assembler::StoreStore); break; 2119 default : ShouldNotReachHere(); 2120 } 2121 __ blr(); 2122 } 2123 2124 // ============================================================================ 2125 // Constant pool cache access 2126 // 2127 // Memory ordering: 2128 // 2129 // Like done in C++ interpreter, we load the fields 2130 // - _indices 2131 // - _f12_oop 2132 // acquired, because these are asked if the cache is already resolved. We don't 2133 // want to float loads above this check. 2134 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2135 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2136 2137 // Call into the VM if call site is not yet resolved 2138 // 2139 // Input regs: 2140 // - None, all passed regs are outputs. 2141 // 2142 // Returns: 2143 // - Rcache: The const pool cache entry that contains the resolved result. 2144 // - Rresult: Either noreg or output for f1/f2. 2145 // 2146 // Kills: 2147 // - Rscratch 2148 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2149 2150 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2151 Label Lresolved, Ldone; 2152 2153 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2154 // We are resolved if the indices offset contains the current bytecode. 2155 #if defined(VM_LITTLE_ENDIAN) 2156 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2157 #else 2158 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2159 #endif 2160 // Acquire by cmp-br-isync (see below). 2161 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2162 __ beq(CCR0, Lresolved); 2163 2164 address entry = NULL; 2165 switch (bytecode()) { 2166 case Bytecodes::_getstatic : // fall through 2167 case Bytecodes::_putstatic : // fall through 2168 case Bytecodes::_getfield : // fall through 2169 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2170 case Bytecodes::_invokevirtual : // fall through 2171 case Bytecodes::_invokespecial : // fall through 2172 case Bytecodes::_invokestatic : // fall through 2173 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2174 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2175 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2176 default : ShouldNotReachHere(); break; 2177 } 2178 __ li(R4_ARG2, (int)bytecode()); 2179 __ call_VM(noreg, entry, R4_ARG2, true); 2180 2181 // Update registers with resolved info. 2182 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2183 __ b(Ldone); 2184 2185 __ bind(Lresolved); 2186 __ isync(); // Order load wrt. succeeding loads. 2187 __ bind(Ldone); 2188 } 2189 2190 // Load the constant pool cache entry at field accesses into registers. 2191 // The Rcache and Rindex registers must be set before call. 2192 // Input: 2193 // - Rcache, Rindex 2194 // Output: 2195 // - Robj, Roffset, Rflags 2196 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2197 Register Rcache, 2198 Register Rindex /* unused on PPC64 */, 2199 Register Roffset, 2200 Register Rflags, 2201 bool is_static = false) { 2202 assert_different_registers(Rcache, Rflags, Roffset); 2203 // assert(Rindex == noreg, "parameter not used on PPC64"); 2204 2205 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2206 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2207 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2208 if (is_static) { 2209 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2210 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2211 // Acquire not needed here. Following access has an address dependency on this value. 2212 } 2213 } 2214 2215 // Load the constant pool cache entry at invokes into registers. 2216 // Resolve if necessary. 2217 2218 // Input Registers: 2219 // - None, bcp is used, though 2220 // 2221 // Return registers: 2222 // - Rmethod (f1 field or f2 if invokevirtual) 2223 // - Ritable_index (f2 field) 2224 // - Rflags (flags field) 2225 // 2226 // Kills: 2227 // - R21 2228 // 2229 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2230 Register Rmethod, 2231 Register Ritable_index, 2232 Register Rflags, 2233 bool is_invokevirtual, 2234 bool is_invokevfinal, 2235 bool is_invokedynamic) { 2236 2237 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2238 // Determine constant pool cache field offsets. 2239 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2240 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2241 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2242 // Access constant pool cache fields. 2243 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2244 2245 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2246 2247 if (is_invokevfinal) { 2248 assert(Ritable_index == noreg, "register not used"); 2249 // Already resolved. 2250 __ get_cache_and_index_at_bcp(Rcache, 1); 2251 } else { 2252 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2253 } 2254 2255 __ ld(Rmethod, method_offset, Rcache); 2256 __ ld(Rflags, flags_offset, Rcache); 2257 2258 if (Ritable_index != noreg) { 2259 __ ld(Ritable_index, index_offset, Rcache); 2260 } 2261 } 2262 2263 // ============================================================================ 2264 // Field access 2265 2266 // Volatile variables demand their effects be made known to all CPU's 2267 // in order. Store buffers on most chips allow reads & writes to 2268 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2269 // without some kind of memory barrier (i.e., it's not sufficient that 2270 // the interpreter does not reorder volatile references, the hardware 2271 // also must not reorder them). 2272 // 2273 // According to the new Java Memory Model (JMM): 2274 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2275 // writes act as aquire & release, so: 2276 // (2) A read cannot let unrelated NON-volatile memory refs that 2277 // happen after the read float up to before the read. It's OK for 2278 // non-volatile memory refs that happen before the volatile read to 2279 // float down below it. 2280 // (3) Similar a volatile write cannot let unrelated NON-volatile 2281 // memory refs that happen BEFORE the write float down to after the 2282 // write. It's OK for non-volatile memory refs that happen after the 2283 // volatile write to float up before it. 2284 // 2285 // We only put in barriers around volatile refs (they are expensive), 2286 // not _between_ memory refs (that would require us to track the 2287 // flavor of the previous memory refs). Requirements (2) and (3) 2288 // require some barriers before volatile stores and after volatile 2289 // loads. These nearly cover requirement (1) but miss the 2290 // volatile-store-volatile-load case. This final case is placed after 2291 // volatile-stores although it could just as well go before 2292 // volatile-loads. 2293 2294 // The registers cache and index expected to be set before call. 2295 // Correct values of the cache and index registers are preserved. 2296 // Kills: 2297 // Rcache (if has_tos) 2298 // Rscratch 2299 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2300 2301 assert_different_registers(Rcache, Rscratch); 2302 2303 if (JvmtiExport::can_post_field_access()) { 2304 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2305 Label Lno_field_access_post; 2306 2307 // Check if post field access in enabled. 2308 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2309 __ lwz(Rscratch, offs, Rscratch); 2310 2311 __ cmpwi(CCR0, Rscratch, 0); 2312 __ beq(CCR0, Lno_field_access_post); 2313 2314 // Post access enabled - do it! 2315 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2316 if (is_static) { 2317 __ li(R17_tos, 0); 2318 } else { 2319 if (has_tos) { 2320 // The fast bytecode versions have obj ptr in register. 2321 // Thus, save object pointer before call_VM() clobbers it 2322 // put object on tos where GC wants it. 2323 __ push_ptr(R17_tos); 2324 } else { 2325 // Load top of stack (do not pop the value off the stack). 2326 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2327 } 2328 __ verify_oop(R17_tos); 2329 } 2330 // tos: object pointer or NULL if static 2331 // cache: cache entry pointer 2332 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2333 if (!is_static && has_tos) { 2334 // Restore object pointer. 2335 __ pop_ptr(R17_tos); 2336 __ verify_oop(R17_tos); 2337 } else { 2338 // Cache is still needed to get class or obj. 2339 __ get_cache_and_index_at_bcp(Rcache, 1); 2340 } 2341 2342 __ align(32, 12); 2343 __ bind(Lno_field_access_post); 2344 } 2345 } 2346 2347 // kills R11_scratch1 2348 void TemplateTable::pop_and_check_object(Register Roop) { 2349 Register Rtmp = R11_scratch1; 2350 2351 assert_different_registers(Rtmp, Roop); 2352 __ pop_ptr(Roop); 2353 // For field access must check obj. 2354 __ null_check_throw(Roop, -1, Rtmp); 2355 __ verify_oop(Roop); 2356 } 2357 2358 // PPC64: implement volatile loads as fence-store-acquire. 2359 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2360 transition(vtos, vtos); 2361 2362 Label Lacquire, Lisync; 2363 2364 const Register Rcache = R3_ARG1, 2365 Rclass_or_obj = R22_tmp2, 2366 Roffset = R23_tmp3, 2367 Rflags = R31, 2368 Rbtable = R5_ARG3, 2369 Rbc = R6_ARG4, 2370 Rscratch = R12_scratch2; 2371 2372 static address field_branch_table[number_of_states], 2373 static_branch_table[number_of_states]; 2374 2375 address* branch_table = is_static ? static_branch_table : field_branch_table; 2376 2377 // Get field offset. 2378 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2379 2380 // JVMTI support 2381 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2382 2383 // Load after possible GC. 2384 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2385 2386 // Load pointer to branch table. 2387 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2388 2389 // Get volatile flag. 2390 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2391 // Note: sync is needed before volatile load on PPC64. 2392 2393 // Check field type. 2394 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2395 2396 #ifdef ASSERT 2397 Label LFlagInvalid; 2398 __ cmpldi(CCR0, Rflags, number_of_states); 2399 __ bge(CCR0, LFlagInvalid); 2400 #endif 2401 2402 // Load from branch table and dispatch (volatile case: one instruction ahead). 2403 __ sldi(Rflags, Rflags, LogBytesPerWord); 2404 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2405 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2406 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2407 } 2408 __ ldx(Rbtable, Rbtable, Rflags); 2409 2410 // Get the obj from stack. 2411 if (!is_static) { 2412 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2413 } else { 2414 __ verify_oop(Rclass_or_obj); 2415 } 2416 2417 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2418 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2419 } 2420 __ mtctr(Rbtable); 2421 __ bctr(); 2422 2423 #ifdef ASSERT 2424 __ bind(LFlagInvalid); 2425 __ stop("got invalid flag", 0x654); 2426 2427 // __ bind(Lvtos); 2428 address pc_before_fence = __ pc(); 2429 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2430 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2431 assert(branch_table[vtos] == 0, "can't compute twice"); 2432 branch_table[vtos] = __ pc(); // non-volatile_entry point 2433 __ stop("vtos unexpected", 0x655); 2434 #endif 2435 2436 __ align(32, 28, 28); // Align load. 2437 // __ bind(Ldtos); 2438 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2439 assert(branch_table[dtos] == 0, "can't compute twice"); 2440 branch_table[dtos] = __ pc(); // non-volatile_entry point 2441 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2442 __ push(dtos); 2443 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2444 { 2445 Label acquire_double; 2446 __ beq(CCR6, acquire_double); // Volatile? 2447 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2448 2449 __ bind(acquire_double); 2450 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2451 __ beq_predict_taken(CCR0, Lisync); 2452 __ b(Lisync); // In case of NAN. 2453 } 2454 2455 __ align(32, 28, 28); // Align load. 2456 // __ bind(Lftos); 2457 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2458 assert(branch_table[ftos] == 0, "can't compute twice"); 2459 branch_table[ftos] = __ pc(); // non-volatile_entry point 2460 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2461 __ push(ftos); 2462 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2463 { 2464 Label acquire_float; 2465 __ beq(CCR6, acquire_float); // Volatile? 2466 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2467 2468 __ bind(acquire_float); 2469 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2470 __ beq_predict_taken(CCR0, Lisync); 2471 __ b(Lisync); // In case of NAN. 2472 } 2473 2474 __ align(32, 28, 28); // Align load. 2475 // __ bind(Litos); 2476 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2477 assert(branch_table[itos] == 0, "can't compute twice"); 2478 branch_table[itos] = __ pc(); // non-volatile_entry point 2479 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2480 __ push(itos); 2481 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2482 __ beq(CCR6, Lacquire); // Volatile? 2483 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2484 2485 __ align(32, 28, 28); // Align load. 2486 // __ bind(Lltos); 2487 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2488 assert(branch_table[ltos] == 0, "can't compute twice"); 2489 branch_table[ltos] = __ pc(); // non-volatile_entry point 2490 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2491 __ push(ltos); 2492 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2493 __ beq(CCR6, Lacquire); // Volatile? 2494 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2495 2496 __ align(32, 28, 28); // Align load. 2497 // __ bind(Lbtos); 2498 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2499 assert(branch_table[btos] == 0, "can't compute twice"); 2500 branch_table[btos] = __ pc(); // non-volatile_entry point 2501 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2502 __ extsb(R17_tos, R17_tos); 2503 __ push(btos); 2504 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2505 __ beq(CCR6, Lacquire); // Volatile? 2506 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2507 2508 __ align(32, 28, 28); // Align load. 2509 // __ bind(Lctos); 2510 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2511 assert(branch_table[ctos] == 0, "can't compute twice"); 2512 branch_table[ctos] = __ pc(); // non-volatile_entry point 2513 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2514 __ push(ctos); 2515 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2516 __ beq(CCR6, Lacquire); // Volatile? 2517 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2518 2519 __ align(32, 28, 28); // Align load. 2520 // __ bind(Lstos); 2521 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2522 assert(branch_table[stos] == 0, "can't compute twice"); 2523 branch_table[stos] = __ pc(); // non-volatile_entry point 2524 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2525 __ push(stos); 2526 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2527 __ beq(CCR6, Lacquire); // Volatile? 2528 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2529 2530 __ align(32, 28, 28); // Align load. 2531 // __ bind(Latos); 2532 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2533 assert(branch_table[atos] == 0, "can't compute twice"); 2534 branch_table[atos] = __ pc(); // non-volatile_entry point 2535 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2536 __ verify_oop(R17_tos); 2537 __ push(atos); 2538 //__ dcbt(R17_tos); // prefetch 2539 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2540 __ beq(CCR6, Lacquire); // Volatile? 2541 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2542 2543 __ align(32, 12); 2544 __ bind(Lacquire); 2545 __ twi_0(R17_tos); 2546 __ bind(Lisync); 2547 __ isync(); // acquire 2548 2549 #ifdef ASSERT 2550 for (int i = 0; i<number_of_states; ++i) { 2551 assert(branch_table[i], "get initialization"); 2552 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2553 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2554 } 2555 #endif 2556 } 2557 2558 void TemplateTable::getfield(int byte_no) { 2559 getfield_or_static(byte_no, false); 2560 } 2561 2562 void TemplateTable::getstatic(int byte_no) { 2563 getfield_or_static(byte_no, true); 2564 } 2565 2566 // The registers cache and index expected to be set before call. 2567 // The function may destroy various registers, just not the cache and index registers. 2568 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2569 2570 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2571 2572 if (JvmtiExport::can_post_field_modification()) { 2573 Label Lno_field_mod_post; 2574 2575 // Check if post field access in enabled. 2576 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2577 __ lwz(Rscratch, offs, Rscratch); 2578 2579 __ cmpwi(CCR0, Rscratch, 0); 2580 __ beq(CCR0, Lno_field_mod_post); 2581 2582 // Do the post 2583 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2584 const Register Robj = Rscratch; 2585 2586 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2587 if (is_static) { 2588 // Life is simple. Null out the object pointer. 2589 __ li(Robj, 0); 2590 } else { 2591 // In case of the fast versions, value lives in registers => put it back on tos. 2592 int offs = Interpreter::expr_offset_in_bytes(0); 2593 Register base = R15_esp; 2594 switch(bytecode()) { 2595 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2596 case Bytecodes::_fast_iputfield: // Fall through 2597 case Bytecodes::_fast_bputfield: // Fall through 2598 case Bytecodes::_fast_cputfield: // Fall through 2599 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2600 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2601 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2602 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2603 default: { 2604 offs = 0; 2605 base = Robj; 2606 const Register Rflags = Robj; 2607 Label is_one_slot; 2608 // Life is harder. The stack holds the value on top, followed by the 2609 // object. We don't know the size of the value, though; it could be 2610 // one or two words depending on its type. As a result, we must find 2611 // the type to determine where the object is. 2612 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2613 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2614 2615 __ cmpwi(CCR0, Rflags, ltos); 2616 __ cmpwi(CCR1, Rflags, dtos); 2617 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2618 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 2619 __ beq(CCR0, is_one_slot); 2620 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2621 __ bind(is_one_slot); 2622 break; 2623 } 2624 } 2625 __ ld(Robj, offs, base); 2626 __ verify_oop(Robj); 2627 } 2628 2629 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2630 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2631 __ get_cache_and_index_at_bcp(Rcache, 1); 2632 2633 // In case of the fast versions, value lives in registers => put it back on tos. 2634 switch(bytecode()) { 2635 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2636 case Bytecodes::_fast_iputfield: // Fall through 2637 case Bytecodes::_fast_bputfield: // Fall through 2638 case Bytecodes::_fast_cputfield: // Fall through 2639 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2640 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2641 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2642 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2643 default: break; // Nothin' to do. 2644 } 2645 2646 __ align(32, 12); 2647 __ bind(Lno_field_mod_post); 2648 } 2649 } 2650 2651 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2652 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2653 Label Lvolatile; 2654 2655 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2656 Rclass_or_obj = R31, // Needs to survive C call. 2657 Roffset = R22_tmp2, // Needs to survive C call. 2658 Rflags = R3_ARG1, 2659 Rbtable = R4_ARG2, 2660 Rscratch = R11_scratch1, 2661 Rscratch2 = R12_scratch2, 2662 Rscratch3 = R6_ARG4, 2663 Rbc = Rscratch3; 2664 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2665 2666 static address field_branch_table[number_of_states], 2667 static_branch_table[number_of_states]; 2668 2669 address* branch_table = is_static ? static_branch_table : field_branch_table; 2670 2671 // Stack (grows up): 2672 // value 2673 // obj 2674 2675 // Load the field offset. 2676 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2677 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2678 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2679 2680 // Load pointer to branch table. 2681 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2682 2683 // Get volatile flag. 2684 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2685 2686 // Check the field type. 2687 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2688 2689 #ifdef ASSERT 2690 Label LFlagInvalid; 2691 __ cmpldi(CCR0, Rflags, number_of_states); 2692 __ bge(CCR0, LFlagInvalid); 2693 #endif 2694 2695 // Load from branch table and dispatch (volatile case: one instruction ahead). 2696 __ sldi(Rflags, Rflags, LogBytesPerWord); 2697 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2698 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2699 __ ldx(Rbtable, Rbtable, Rflags); 2700 2701 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2702 __ mtctr(Rbtable); 2703 __ bctr(); 2704 2705 #ifdef ASSERT 2706 __ bind(LFlagInvalid); 2707 __ stop("got invalid flag", 0x656); 2708 2709 // __ bind(Lvtos); 2710 address pc_before_release = __ pc(); 2711 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2712 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2713 assert(branch_table[vtos] == 0, "can't compute twice"); 2714 branch_table[vtos] = __ pc(); // non-volatile_entry point 2715 __ stop("vtos unexpected", 0x657); 2716 #endif 2717 2718 __ align(32, 28, 28); // Align pop. 2719 // __ bind(Ldtos); 2720 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2721 assert(branch_table[dtos] == 0, "can't compute twice"); 2722 branch_table[dtos] = __ pc(); // non-volatile_entry point 2723 __ pop(dtos); 2724 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2725 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2726 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2727 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2728 __ beq(CR_is_vol, Lvolatile); // Volatile? 2729 } 2730 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2731 2732 __ align(32, 28, 28); // Align pop. 2733 // __ bind(Lftos); 2734 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2735 assert(branch_table[ftos] == 0, "can't compute twice"); 2736 branch_table[ftos] = __ pc(); // non-volatile_entry point 2737 __ pop(ftos); 2738 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2739 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2740 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2741 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2742 __ beq(CR_is_vol, Lvolatile); // Volatile? 2743 } 2744 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2745 2746 __ align(32, 28, 28); // Align pop. 2747 // __ bind(Litos); 2748 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2749 assert(branch_table[itos] == 0, "can't compute twice"); 2750 branch_table[itos] = __ pc(); // non-volatile_entry point 2751 __ pop(itos); 2752 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2753 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2754 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2755 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2756 __ beq(CR_is_vol, Lvolatile); // Volatile? 2757 } 2758 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2759 2760 __ align(32, 28, 28); // Align pop. 2761 // __ bind(Lltos); 2762 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2763 assert(branch_table[ltos] == 0, "can't compute twice"); 2764 branch_table[ltos] = __ pc(); // non-volatile_entry point 2765 __ pop(ltos); 2766 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2767 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2768 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2769 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2770 __ beq(CR_is_vol, Lvolatile); // Volatile? 2771 } 2772 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2773 2774 __ align(32, 28, 28); // Align pop. 2775 // __ bind(Lbtos); 2776 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2777 assert(branch_table[btos] == 0, "can't compute twice"); 2778 branch_table[btos] = __ pc(); // non-volatile_entry point 2779 __ pop(btos); 2780 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2781 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2782 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2783 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2784 __ beq(CR_is_vol, Lvolatile); // Volatile? 2785 } 2786 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2787 2788 __ align(32, 28, 28); // Align pop. 2789 // __ bind(Lctos); 2790 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2791 assert(branch_table[ctos] == 0, "can't compute twice"); 2792 branch_table[ctos] = __ pc(); // non-volatile_entry point 2793 __ pop(ctos); 2794 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2795 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2796 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2797 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2798 __ beq(CR_is_vol, Lvolatile); // Volatile? 2799 } 2800 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2801 2802 __ align(32, 28, 28); // Align pop. 2803 // __ bind(Lstos); 2804 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2805 assert(branch_table[stos] == 0, "can't compute twice"); 2806 branch_table[stos] = __ pc(); // non-volatile_entry point 2807 __ pop(stos); 2808 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2809 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2810 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2811 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2812 __ beq(CR_is_vol, Lvolatile); // Volatile? 2813 } 2814 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2815 2816 __ align(32, 28, 28); // Align pop. 2817 // __ bind(Latos); 2818 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2819 assert(branch_table[atos] == 0, "can't compute twice"); 2820 branch_table[atos] = __ pc(); // non-volatile_entry point 2821 __ pop(atos); 2822 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2823 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2824 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2825 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2826 __ beq(CR_is_vol, Lvolatile); // Volatile? 2827 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2828 2829 __ align(32, 12); 2830 __ bind(Lvolatile); 2831 __ fence(); 2832 } 2833 // fallthru: __ b(Lexit); 2834 2835 #ifdef ASSERT 2836 for (int i = 0; i<number_of_states; ++i) { 2837 assert(branch_table[i], "put initialization"); 2838 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2839 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2840 } 2841 #endif 2842 } 2843 2844 void TemplateTable::putfield(int byte_no) { 2845 putfield_or_static(byte_no, false); 2846 } 2847 2848 void TemplateTable::putstatic(int byte_no) { 2849 putfield_or_static(byte_no, true); 2850 } 2851 2852 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2853 void TemplateTable::jvmti_post_fast_field_mod() { 2854 __ should_not_reach_here(); 2855 } 2856 2857 void TemplateTable::fast_storefield(TosState state) { 2858 transition(state, vtos); 2859 2860 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2861 Rclass_or_obj = R31, // Needs to survive C call. 2862 Roffset = R22_tmp2, // Needs to survive C call. 2863 Rflags = R3_ARG1, 2864 Rscratch = R11_scratch1, 2865 Rscratch2 = R12_scratch2, 2866 Rscratch3 = R4_ARG2; 2867 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2868 2869 // Constant pool already resolved => Load flags and offset of field. 2870 __ get_cache_and_index_at_bcp(Rcache, 1); 2871 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2872 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2873 2874 // Get the obj and the final store addr. 2875 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2876 2877 // Get volatile flag. 2878 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2879 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2880 { 2881 Label LnotVolatile; 2882 __ beq(CCR0, LnotVolatile); 2883 __ release(); 2884 __ align(32, 12); 2885 __ bind(LnotVolatile); 2886 } 2887 2888 // Do the store and fencing. 2889 switch(bytecode()) { 2890 case Bytecodes::_fast_aputfield: 2891 // Store into the field. 2892 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2893 break; 2894 2895 case Bytecodes::_fast_iputfield: 2896 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2897 break; 2898 2899 case Bytecodes::_fast_lputfield: 2900 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2901 break; 2902 2903 case Bytecodes::_fast_bputfield: 2904 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2905 break; 2906 2907 case Bytecodes::_fast_cputfield: 2908 case Bytecodes::_fast_sputfield: 2909 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2910 break; 2911 2912 case Bytecodes::_fast_fputfield: 2913 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2914 break; 2915 2916 case Bytecodes::_fast_dputfield: 2917 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2918 break; 2919 2920 default: ShouldNotReachHere(); 2921 } 2922 2923 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2924 Label LVolatile; 2925 __ beq(CR_is_vol, LVolatile); 2926 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2927 2928 __ align(32, 12); 2929 __ bind(LVolatile); 2930 __ fence(); 2931 } 2932 } 2933 2934 void TemplateTable::fast_accessfield(TosState state) { 2935 transition(atos, state); 2936 2937 Label LisVolatile; 2938 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2939 2940 const Register Rcache = R3_ARG1, 2941 Rclass_or_obj = R17_tos, 2942 Roffset = R22_tmp2, 2943 Rflags = R23_tmp3, 2944 Rscratch = R12_scratch2; 2945 2946 // Constant pool already resolved. Get the field offset. 2947 __ get_cache_and_index_at_bcp(Rcache, 1); 2948 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2949 2950 // JVMTI support 2951 jvmti_post_field_access(Rcache, Rscratch, false, true); 2952 2953 // Get the load address. 2954 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 2955 2956 // Get volatile flag. 2957 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2958 __ bne(CCR0, LisVolatile); 2959 2960 switch(bytecode()) { 2961 case Bytecodes::_fast_agetfield: 2962 { 2963 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2964 __ verify_oop(R17_tos); 2965 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2966 2967 __ bind(LisVolatile); 2968 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2969 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2970 __ verify_oop(R17_tos); 2971 __ twi_0(R17_tos); 2972 __ isync(); 2973 break; 2974 } 2975 case Bytecodes::_fast_igetfield: 2976 { 2977 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2978 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2979 2980 __ bind(LisVolatile); 2981 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2982 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2983 __ twi_0(R17_tos); 2984 __ isync(); 2985 break; 2986 } 2987 case Bytecodes::_fast_lgetfield: 2988 { 2989 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2990 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2991 2992 __ bind(LisVolatile); 2993 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2994 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2995 __ twi_0(R17_tos); 2996 __ isync(); 2997 break; 2998 } 2999 case Bytecodes::_fast_bgetfield: 3000 { 3001 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3002 __ extsb(R17_tos, R17_tos); 3003 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3004 3005 __ bind(LisVolatile); 3006 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3007 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3008 __ twi_0(R17_tos); 3009 __ extsb(R17_tos, R17_tos); 3010 __ isync(); 3011 break; 3012 } 3013 case Bytecodes::_fast_cgetfield: 3014 { 3015 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3016 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3017 3018 __ bind(LisVolatile); 3019 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3020 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3021 __ twi_0(R17_tos); 3022 __ isync(); 3023 break; 3024 } 3025 case Bytecodes::_fast_sgetfield: 3026 { 3027 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3028 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3029 3030 __ bind(LisVolatile); 3031 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3032 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3033 __ twi_0(R17_tos); 3034 __ isync(); 3035 break; 3036 } 3037 case Bytecodes::_fast_fgetfield: 3038 { 3039 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3040 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3041 3042 __ bind(LisVolatile); 3043 Label Ldummy; 3044 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3045 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3046 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3047 __ bne_predict_not_taken(CCR0, Ldummy); 3048 __ bind(Ldummy); 3049 __ isync(); 3050 break; 3051 } 3052 case Bytecodes::_fast_dgetfield: 3053 { 3054 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3055 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3056 3057 __ bind(LisVolatile); 3058 Label Ldummy; 3059 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3060 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3061 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3062 __ bne_predict_not_taken(CCR0, Ldummy); 3063 __ bind(Ldummy); 3064 __ isync(); 3065 break; 3066 } 3067 default: ShouldNotReachHere(); 3068 } 3069 } 3070 3071 void TemplateTable::fast_xaccess(TosState state) { 3072 transition(vtos, state); 3073 3074 Label LisVolatile; 3075 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3076 const Register Rcache = R3_ARG1, 3077 Rclass_or_obj = R17_tos, 3078 Roffset = R22_tmp2, 3079 Rflags = R23_tmp3, 3080 Rscratch = R12_scratch2; 3081 3082 __ ld(Rclass_or_obj, 0, R18_locals); 3083 3084 // Constant pool already resolved. Get the field offset. 3085 __ get_cache_and_index_at_bcp(Rcache, 2); 3086 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3087 3088 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3089 3090 // Needed to report exception at the correct bcp. 3091 __ addi(R14_bcp, R14_bcp, 1); 3092 3093 // Get the load address. 3094 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3095 3096 // Get volatile flag. 3097 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3098 __ bne(CCR0, LisVolatile); 3099 3100 switch(state) { 3101 case atos: 3102 { 3103 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3104 __ verify_oop(R17_tos); 3105 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3106 3107 __ bind(LisVolatile); 3108 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3109 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3110 __ verify_oop(R17_tos); 3111 __ twi_0(R17_tos); 3112 __ isync(); 3113 break; 3114 } 3115 case itos: 3116 { 3117 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3118 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3119 3120 __ bind(LisVolatile); 3121 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3122 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3123 __ twi_0(R17_tos); 3124 __ isync(); 3125 break; 3126 } 3127 case ftos: 3128 { 3129 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3130 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3131 3132 __ bind(LisVolatile); 3133 Label Ldummy; 3134 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3135 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3136 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3137 __ bne_predict_not_taken(CCR0, Ldummy); 3138 __ bind(Ldummy); 3139 __ isync(); 3140 break; 3141 } 3142 default: ShouldNotReachHere(); 3143 } 3144 __ addi(R14_bcp, R14_bcp, -1); 3145 } 3146 3147 // ============================================================================ 3148 // Calls 3149 3150 // Common code for invoke 3151 // 3152 // Input: 3153 // - byte_no 3154 // 3155 // Output: 3156 // - Rmethod: The method to invoke next. 3157 // - Rret_addr: The return address to return to. 3158 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3159 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3160 // - Rflags: Method flags from const pool cache. 3161 // 3162 // Kills: 3163 // - Rscratch1 3164 // 3165 void TemplateTable::prepare_invoke(int byte_no, 3166 Register Rmethod, // linked method (or i-klass) 3167 Register Rret_addr,// return address 3168 Register Rindex, // itable index, MethodType, etc. 3169 Register Rrecv, // If caller wants to see it. 3170 Register Rflags, // If caller wants to test it. 3171 Register Rscratch 3172 ) { 3173 // Determine flags. 3174 const Bytecodes::Code code = bytecode(); 3175 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3176 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3177 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3178 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3179 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3180 const bool load_receiver = (Rrecv != noreg); 3181 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3182 3183 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3184 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3185 assert_different_registers(Rret_addr, Rscratch); 3186 3187 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3188 3189 // Saving of SP done in call_from_interpreter. 3190 3191 // Maybe push "appendix" to arguments. 3192 if (is_invokedynamic || is_invokehandle) { 3193 Label Ldone; 3194 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3195 __ beq(CCR0, Ldone); 3196 // Push "appendix" (MethodType, CallSite, etc.). 3197 // This must be done before we get the receiver, 3198 // since the parameter_size includes it. 3199 __ load_resolved_reference_at_index(Rscratch, Rindex); 3200 __ verify_oop(Rscratch); 3201 __ push_ptr(Rscratch); 3202 __ bind(Ldone); 3203 } 3204 3205 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3206 if (load_receiver) { 3207 const Register Rparam_count = Rscratch; 3208 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3209 __ load_receiver(Rparam_count, Rrecv); 3210 __ verify_oop(Rrecv); 3211 } 3212 3213 // Get return address. 3214 { 3215 Register Rtable_addr = Rscratch; 3216 Register Rret_type = Rret_addr; 3217 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3218 3219 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3220 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3221 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3222 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3223 // Get return address. 3224 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3225 } 3226 } 3227 3228 // Helper for virtual calls. Load target out of vtable and jump off! 3229 // Kills all passed registers. 3230 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3231 3232 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3233 const Register Rtarget_method = Rindex; 3234 3235 // Get target method & entry point. 3236 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3237 // Calc vtable addr scale the vtable index by 8. 3238 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3239 // Load target. 3240 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3241 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3242 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3243 } 3244 3245 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3246 void TemplateTable::invokevirtual(int byte_no) { 3247 transition(vtos, vtos); 3248 3249 Register Rtable_addr = R11_scratch1, 3250 Rret_type = R12_scratch2, 3251 Rret_addr = R5_ARG3, 3252 Rflags = R22_tmp2, // Should survive C call. 3253 Rrecv = R3_ARG1, 3254 Rrecv_klass = Rrecv, 3255 Rvtableindex_or_method = R31, // Should survive C call. 3256 Rnum_params = R4_ARG2, 3257 Rnew_bc = R6_ARG4; 3258 3259 Label LnotFinal; 3260 3261 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3262 3263 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3264 __ bfalse(CCR0, LnotFinal); 3265 3266 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3267 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3268 3269 __ align(32, 12); 3270 __ bind(LnotFinal); 3271 // Load "this" pointer (receiver). 3272 __ rldicl(Rnum_params, Rflags, 64, 48); 3273 __ load_receiver(Rnum_params, Rrecv); 3274 __ verify_oop(Rrecv); 3275 3276 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3277 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3278 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3279 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3280 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3281 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3282 __ load_klass(Rrecv_klass, Rrecv); 3283 __ verify_klass_ptr(Rrecv_klass); 3284 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3285 3286 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3287 } 3288 3289 void TemplateTable::fast_invokevfinal(int byte_no) { 3290 transition(vtos, vtos); 3291 3292 assert(byte_no == f2_byte, "use this argument"); 3293 Register Rflags = R22_tmp2, 3294 Rmethod = R31; 3295 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3296 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3297 } 3298 3299 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3300 3301 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3302 3303 // Load receiver from stack slot. 3304 Register Rrecv = Rscratch2; 3305 Register Rnum_params = Rrecv; 3306 3307 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3308 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3309 3310 // Get return address. 3311 Register Rtable_addr = Rscratch1, 3312 Rret_addr = Rflags, 3313 Rret_type = Rret_addr; 3314 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3315 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3316 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3317 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3318 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3319 3320 // Load receiver and receiver NULL check. 3321 __ load_receiver(Rnum_params, Rrecv); 3322 __ null_check_throw(Rrecv, -1, Rscratch1); 3323 3324 __ profile_final_call(Rrecv, Rscratch1); 3325 3326 // Do the call. 3327 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3328 } 3329 3330 void TemplateTable::invokespecial(int byte_no) { 3331 assert(byte_no == f1_byte, "use this argument"); 3332 transition(vtos, vtos); 3333 3334 Register Rtable_addr = R3_ARG1, 3335 Rret_addr = R4_ARG2, 3336 Rflags = R5_ARG3, 3337 Rreceiver = R6_ARG4, 3338 Rmethod = R31; 3339 3340 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3341 3342 // Receiver NULL check. 3343 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3344 3345 __ profile_call(R11_scratch1, R12_scratch2); 3346 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3347 } 3348 3349 void TemplateTable::invokestatic(int byte_no) { 3350 assert(byte_no == f1_byte, "use this argument"); 3351 transition(vtos, vtos); 3352 3353 Register Rtable_addr = R3_ARG1, 3354 Rret_addr = R4_ARG2, 3355 Rflags = R5_ARG3; 3356 3357 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3358 3359 __ profile_call(R11_scratch1, R12_scratch2); 3360 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3361 } 3362 3363 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3364 Register Rret, 3365 Register Rflags, 3366 Register Rindex, 3367 Register Rtemp1, 3368 Register Rtemp2) { 3369 3370 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3371 Label LnotFinal; 3372 3373 // Check for vfinal. 3374 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3375 __ bfalse(CCR0, LnotFinal); 3376 3377 Register Rscratch = Rflags; // Rflags is dead now. 3378 3379 // Final call case. 3380 __ profile_final_call(Rtemp1, Rscratch); 3381 // Do the final call - the index (f2) contains the method. 3382 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3383 3384 // Non-final callc case. 3385 __ bind(LnotFinal); 3386 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3387 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3388 } 3389 3390 void TemplateTable::invokeinterface(int byte_no) { 3391 assert(byte_no == f1_byte, "use this argument"); 3392 transition(vtos, vtos); 3393 3394 const Register Rscratch1 = R11_scratch1, 3395 Rscratch2 = R12_scratch2, 3396 Rscratch3 = R9_ARG7, 3397 Rscratch4 = R10_ARG8, 3398 Rtable_addr = Rscratch2, 3399 Rinterface_klass = R5_ARG3, 3400 Rret_type = R8_ARG6, 3401 Rret_addr = Rret_type, 3402 Rindex = R6_ARG4, 3403 Rreceiver = R4_ARG2, 3404 Rrecv_klass = Rreceiver, 3405 Rflags = R7_ARG5; 3406 3407 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3408 3409 // Get receiver klass. 3410 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3411 __ load_klass(Rrecv_klass, Rreceiver); 3412 3413 // Check corner case object method. 3414 Label LobjectMethod; 3415 3416 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3417 __ btrue(CCR0, LobjectMethod); 3418 3419 // Fallthrough: The normal invokeinterface case. 3420 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3421 3422 // Find entry point to call. 3423 Label Lthrow_icc, Lthrow_ame; 3424 // Result will be returned in Rindex. 3425 __ mr(Rscratch4, Rrecv_klass); 3426 __ mr(Rscratch3, Rindex); 3427 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3428 3429 __ cmpdi(CCR0, Rindex, 0); 3430 __ beq(CCR0, Lthrow_ame); 3431 // Found entry. Jump off! 3432 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3433 3434 // Vtable entry was NULL => Throw abstract method error. 3435 __ bind(Lthrow_ame); 3436 __ mr(Rrecv_klass, Rscratch4); 3437 __ mr(Rindex, Rscratch3); 3438 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3439 3440 // Interface was not found => Throw incompatible class change error. 3441 __ bind(Lthrow_icc); 3442 __ mr(Rrecv_klass, Rscratch4); 3443 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3444 3445 __ should_not_reach_here(); 3446 3447 // Special case of invokeinterface called for virtual method of 3448 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3449 // The invokeinterface was rewritten to a invokevirtual, hence we have 3450 // to handle this corner case. This code isn't produced by javac, but could 3451 // be produced by another compliant java compiler. 3452 __ bind(LobjectMethod); 3453 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3454 } 3455 3456 void TemplateTable::invokedynamic(int byte_no) { 3457 transition(vtos, vtos); 3458 3459 const Register Rret_addr = R3_ARG1, 3460 Rflags = R4_ARG2, 3461 Rmethod = R22_tmp2, 3462 Rscratch1 = R11_scratch1, 3463 Rscratch2 = R12_scratch2; 3464 3465 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3466 3467 // Profile this call. 3468 __ profile_call(Rscratch1, Rscratch2); 3469 3470 // Off we go. With the new method handles, we don't jump to a method handle 3471 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3472 // to be the callsite object the bootstrap method returned. This is passed to a 3473 // "link" method which does the dispatch (Most likely just grabs the MH stored 3474 // inside the callsite and does an invokehandle). 3475 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3476 } 3477 3478 void TemplateTable::invokehandle(int byte_no) { 3479 transition(vtos, vtos); 3480 3481 const Register Rret_addr = R3_ARG1, 3482 Rflags = R4_ARG2, 3483 Rrecv = R5_ARG3, 3484 Rmethod = R22_tmp2, 3485 Rscratch1 = R11_scratch1, 3486 Rscratch2 = R12_scratch2; 3487 3488 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3489 __ verify_method_ptr(Rmethod); 3490 __ null_check_throw(Rrecv, -1, Rscratch2); 3491 3492 __ profile_final_call(Rrecv, Rscratch1); 3493 3494 // Still no call from handle => We call the method handle interpreter here. 3495 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3496 } 3497 3498 // ============================================================================= 3499 // Allocation 3500 3501 // Puts allocated obj ref onto the expression stack. 3502 void TemplateTable::_new() { 3503 transition(vtos, atos); 3504 3505 Label Lslow_case, 3506 Ldone, 3507 Linitialize_header, 3508 Lallocate_shared, 3509 Linitialize_object; // Including clearing the fields. 3510 3511 const Register RallocatedObject = R17_tos, 3512 RinstanceKlass = R9_ARG7, 3513 Rscratch = R11_scratch1, 3514 Roffset = R8_ARG6, 3515 Rinstance_size = Roffset, 3516 Rcpool = R4_ARG2, 3517 Rtags = R3_ARG1, 3518 Rindex = R5_ARG3; 3519 3520 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3521 3522 // -------------------------------------------------------------------------- 3523 // Check if fast case is possible. 3524 3525 // Load pointers to const pool and const pool's tags array. 3526 __ get_cpool_and_tags(Rcpool, Rtags); 3527 // Load index of constant pool entry. 3528 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3529 3530 if (UseTLAB) { 3531 // Make sure the class we're about to instantiate has been resolved 3532 // This is done before loading instanceKlass to be consistent with the order 3533 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3534 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3535 __ lbzx(Rtags, Rindex, Rtags); 3536 3537 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3538 __ bne(CCR0, Lslow_case); 3539 3540 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3541 __ sldi(Roffset, Rindex, LogBytesPerWord); 3542 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3543 __ isync(); // Order load of instance Klass wrt. tags. 3544 __ ldx(RinstanceKlass, Roffset, Rscratch); 3545 3546 // Make sure klass is fully initialized and get instance_size. 3547 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3548 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3549 3550 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3551 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3552 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3553 3554 __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized? 3555 __ beq(CCR0, Lslow_case); 3556 3557 // -------------------------------------------------------------------------- 3558 // Fast case: 3559 // Allocate the instance. 3560 // 1) Try to allocate in the TLAB. 3561 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3562 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3563 3564 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3565 Register RnewTopValue = R6_ARG4; 3566 Register RendValue = R7_ARG5; 3567 3568 // Check if we can allocate in the TLAB. 3569 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3570 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3571 3572 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3573 3574 // If there is enough space, we do not CAS and do not clear. 3575 __ cmpld(CCR0, RnewTopValue, RendValue); 3576 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3577 3578 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3579 3580 if (ZeroTLAB) { 3581 // The fields have already been cleared. 3582 __ b(Linitialize_header); 3583 } else { 3584 // Initialize both the header and fields. 3585 __ b(Linitialize_object); 3586 } 3587 3588 // Fall through: TLAB was too small. 3589 if (allow_shared_alloc) { 3590 Register RtlabWasteLimitValue = R10_ARG8; 3591 Register RfreeValue = RnewTopValue; 3592 3593 __ bind(Lallocate_shared); 3594 // Check if tlab should be discarded (refill_waste_limit >= free). 3595 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3596 __ subf(RfreeValue, RoldTopValue, RendValue); 3597 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3598 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3599 __ bge(CCR0, Lslow_case); 3600 3601 // Increment waste limit to prevent getting stuck on this slow path. 3602 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3603 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3604 } 3605 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3606 } 3607 // else: Always go the slow path. 3608 3609 // -------------------------------------------------------------------------- 3610 // slow case 3611 __ bind(Lslow_case); 3612 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3613 3614 if (UseTLAB) { 3615 __ b(Ldone); 3616 // -------------------------------------------------------------------------- 3617 // Init1: Zero out newly allocated memory. 3618 3619 if (!ZeroTLAB || allow_shared_alloc) { 3620 // Clear object fields. 3621 __ bind(Linitialize_object); 3622 3623 // Initialize remaining object fields. 3624 Register Rbase = Rtags; 3625 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3626 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3627 __ srdi(Rinstance_size, Rinstance_size, 3); 3628 3629 // Clear out object skipping header. Takes also care of the zero length case. 3630 __ clear_memory_doubleword(Rbase, Rinstance_size); 3631 // fallthru: __ b(Linitialize_header); 3632 } 3633 3634 // -------------------------------------------------------------------------- 3635 // Init2: Initialize the header: mark, klass 3636 __ bind(Linitialize_header); 3637 3638 // Init mark. 3639 if (UseBiasedLocking) { 3640 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3641 } else { 3642 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3643 } 3644 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3645 3646 // Init klass. 3647 __ store_klass_gap(RallocatedObject); 3648 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3649 3650 // Check and trigger dtrace event. 3651 { 3652 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3653 __ push(atos); 3654 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3655 __ pop(atos); 3656 } 3657 } 3658 3659 // continue 3660 __ bind(Ldone); 3661 3662 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3663 __ membar(Assembler::StoreStore); 3664 } 3665 3666 void TemplateTable::newarray() { 3667 transition(itos, atos); 3668 3669 __ lbz(R4, 1, R14_bcp); 3670 __ extsw(R5, R17_tos); 3671 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3672 3673 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3674 __ membar(Assembler::StoreStore); 3675 } 3676 3677 void TemplateTable::anewarray() { 3678 transition(itos, atos); 3679 3680 __ get_constant_pool(R4); 3681 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3682 __ extsw(R6, R17_tos); // size 3683 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3684 3685 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3686 __ membar(Assembler::StoreStore); 3687 } 3688 3689 // Allocate a multi dimensional array 3690 void TemplateTable::multianewarray() { 3691 transition(vtos, atos); 3692 3693 Register Rptr = R31; // Needs to survive C call. 3694 3695 // Put ndims * wordSize into frame temp slot 3696 __ lbz(Rptr, 3, R14_bcp); 3697 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3698 // Esp points past last_dim, so set to R4 to first_dim address. 3699 __ add(R4, Rptr, R15_esp); 3700 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3701 // Pop all dimensions off the stack. 3702 __ add(R15_esp, Rptr, R15_esp); 3703 3704 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3705 __ membar(Assembler::StoreStore); 3706 } 3707 3708 void TemplateTable::arraylength() { 3709 transition(atos, itos); 3710 3711 Label LnoException; 3712 __ verify_oop(R17_tos); 3713 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3714 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3715 } 3716 3717 // ============================================================================ 3718 // Typechecks 3719 3720 void TemplateTable::checkcast() { 3721 transition(atos, atos); 3722 3723 Label Ldone, Lis_null, Lquicked, Lresolved; 3724 Register Roffset = R6_ARG4, 3725 RobjKlass = R4_ARG2, 3726 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3727 Rcpool = R11_scratch1, 3728 Rtags = R12_scratch2; 3729 3730 // Null does not pass. 3731 __ cmpdi(CCR0, R17_tos, 0); 3732 __ beq(CCR0, Lis_null); 3733 3734 // Get constant pool tag to find out if the bytecode has already been "quickened". 3735 __ get_cpool_and_tags(Rcpool, Rtags); 3736 3737 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3738 3739 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3740 __ lbzx(Rtags, Rtags, Roffset); 3741 3742 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3743 __ beq(CCR0, Lquicked); 3744 3745 // Call into the VM to "quicken" instanceof. 3746 __ push_ptr(); // for GC 3747 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3748 __ get_vm_result_2(RspecifiedKlass); 3749 __ pop_ptr(); // Restore receiver. 3750 __ b(Lresolved); 3751 3752 // Extract target class from constant pool. 3753 __ bind(Lquicked); 3754 __ sldi(Roffset, Roffset, LogBytesPerWord); 3755 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3756 __ isync(); // Order load of specified Klass wrt. tags. 3757 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3758 3759 // Do the checkcast. 3760 __ bind(Lresolved); 3761 // Get value klass in RobjKlass. 3762 __ load_klass(RobjKlass, R17_tos); 3763 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3764 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3765 3766 // Not a subtype; so must throw exception 3767 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3768 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3769 __ mtctr(R11_scratch1); 3770 __ bctr(); 3771 3772 // Profile the null case. 3773 __ align(32, 12); 3774 __ bind(Lis_null); 3775 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3776 3777 __ align(32, 12); 3778 __ bind(Ldone); 3779 } 3780 3781 // Output: 3782 // - tos == 0: Obj was null or not an instance of class. 3783 // - tos == 1: Obj was an instance of class. 3784 void TemplateTable::instanceof() { 3785 transition(atos, itos); 3786 3787 Label Ldone, Lis_null, Lquicked, Lresolved; 3788 Register Roffset = R5_ARG3, 3789 RobjKlass = R4_ARG2, 3790 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register. 3791 Rcpool = R11_scratch1, 3792 Rtags = R12_scratch2; 3793 3794 // Null does not pass. 3795 __ cmpdi(CCR0, R17_tos, 0); 3796 __ beq(CCR0, Lis_null); 3797 3798 // Get constant pool tag to find out if the bytecode has already been "quickened". 3799 __ get_cpool_and_tags(Rcpool, Rtags); 3800 3801 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3802 3803 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3804 __ lbzx(Rtags, Rtags, Roffset); 3805 3806 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3807 __ beq(CCR0, Lquicked); 3808 3809 // Call into the VM to "quicken" instanceof. 3810 __ push_ptr(); // for GC 3811 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3812 __ get_vm_result_2(RspecifiedKlass); 3813 __ pop_ptr(); // Restore receiver. 3814 __ b(Lresolved); 3815 3816 // Extract target class from constant pool. 3817 __ bind(Lquicked); 3818 __ sldi(Roffset, Roffset, LogBytesPerWord); 3819 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3820 __ isync(); // Order load of specified Klass wrt. tags. 3821 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3822 3823 // Do the checkcast. 3824 __ bind(Lresolved); 3825 // Get value klass in RobjKlass. 3826 __ load_klass(RobjKlass, R17_tos); 3827 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3828 __ li(R17_tos, 1); 3829 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3830 __ li(R17_tos, 0); 3831 3832 if (ProfileInterpreter) { 3833 __ b(Ldone); 3834 } 3835 3836 // Profile the null case. 3837 __ align(32, 12); 3838 __ bind(Lis_null); 3839 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3840 3841 __ align(32, 12); 3842 __ bind(Ldone); 3843 } 3844 3845 // ============================================================================= 3846 // Breakpoints 3847 3848 void TemplateTable::_breakpoint() { 3849 transition(vtos, vtos); 3850 3851 // Get the unpatched byte code. 3852 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3853 __ mr(R31, R3_RET); 3854 3855 // Post the breakpoint event. 3856 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3857 3858 // Complete the execution of original bytecode. 3859 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3860 } 3861 3862 // ============================================================================= 3863 // Exceptions 3864 3865 void TemplateTable::athrow() { 3866 transition(atos, vtos); 3867 3868 // Exception oop is in tos 3869 __ verify_oop(R17_tos); 3870 3871 __ null_check_throw(R17_tos, -1, R11_scratch1); 3872 3873 // Throw exception interpreter entry expects exception oop to be in R3. 3874 __ mr(R3_RET, R17_tos); 3875 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3876 __ mtctr(R11_scratch1); 3877 __ bctr(); 3878 } 3879 3880 // ============================================================================= 3881 // Synchronization 3882 // Searches the basic object lock list on the stack for a free slot 3883 // and uses it to lock the obect in tos. 3884 // 3885 // Recursive locking is enabled by exiting the search if the same 3886 // object is already found in the list. Thus, a new basic lock obj lock 3887 // is allocated "higher up" in the stack and thus is found first 3888 // at next monitor exit. 3889 void TemplateTable::monitorenter() { 3890 transition(atos, vtos); 3891 3892 __ verify_oop(R17_tos); 3893 3894 Register Rcurrent_monitor = R11_scratch1, 3895 Rcurrent_obj = R12_scratch2, 3896 Robj_to_lock = R17_tos, 3897 Rscratch1 = R3_ARG1, 3898 Rscratch2 = R4_ARG2, 3899 Rscratch3 = R5_ARG3, 3900 Rcurrent_obj_addr = R6_ARG4; 3901 3902 // ------------------------------------------------------------------------------ 3903 // Null pointer exception. 3904 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3905 3906 // Try to acquire a lock on the object. 3907 // Repeat until succeeded (i.e., until monitorenter returns true). 3908 3909 // ------------------------------------------------------------------------------ 3910 // Find a free slot in the monitor block. 3911 Label Lfound, Lexit, Lallocate_new; 3912 ConditionRegister found_free_slot = CCR0, 3913 found_same_obj = CCR1, 3914 reached_limit = CCR6; 3915 { 3916 Label Lloop, Lentry; 3917 Register Rlimit = Rcurrent_monitor; 3918 3919 // Set up search loop - start with topmost monitor. 3920 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 3921 3922 __ ld(Rlimit, 0, R1_SP); 3923 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 3924 3925 // Check if any slot is present => short cut to allocation if not. 3926 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3927 __ bgt(reached_limit, Lallocate_new); 3928 3929 // Pre-load topmost slot. 3930 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3931 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3932 // The search loop. 3933 __ bind(Lloop); 3934 // Found free slot? 3935 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 3936 // Is this entry for same obj? If so, stop the search and take the found 3937 // free slot or allocate a new one to enable recursive locking. 3938 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 3939 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3940 __ beq(found_free_slot, Lexit); 3941 __ beq(found_same_obj, Lallocate_new); 3942 __ bgt(reached_limit, Lallocate_new); 3943 // Check if last allocated BasicLockObj reached. 3944 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3945 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3946 // Next iteration if unchecked BasicObjectLocks exist on the stack. 3947 __ b(Lloop); 3948 } 3949 3950 // ------------------------------------------------------------------------------ 3951 // Check if we found a free slot. 3952 __ bind(Lexit); 3953 3954 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 3955 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 3956 __ b(Lfound); 3957 3958 // We didn't find a free BasicObjLock => allocate one. 3959 __ align(32, 12); 3960 __ bind(Lallocate_new); 3961 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 3962 __ mr(Rcurrent_monitor, R26_monitor); 3963 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 3964 3965 // ------------------------------------------------------------------------------ 3966 // We now have a slot to lock. 3967 __ bind(Lfound); 3968 3969 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3970 // The object has already been poped from the stack, so the expression stack looks correct. 3971 __ addi(R14_bcp, R14_bcp, 1); 3972 3973 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 3974 __ lock_object(Rcurrent_monitor, Robj_to_lock); 3975 3976 // Check if there's enough space on the stack for the monitors after locking. 3977 Label Lskip_stack_check; 3978 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 3979 // the stack check. There should be enough shadow pages to fit that in. 3980 __ ld(Rscratch3, 0, R1_SP); 3981 __ sub(Rscratch3, Rscratch3, R26_monitor); 3982 __ cmpdi(CCR0, Rscratch3, 4*K); 3983 __ blt(CCR0, Lskip_stack_check); 3984 3985 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 3986 __ li(Rscratch1, 0); 3987 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 3988 3989 __ align(32, 12); 3990 __ bind(Lskip_stack_check); 3991 3992 // The bcp has already been incremented. Just need to dispatch to next instruction. 3993 __ dispatch_next(vtos); 3994 } 3995 3996 void TemplateTable::monitorexit() { 3997 transition(atos, vtos); 3998 __ verify_oop(R17_tos); 3999 4000 Register Rcurrent_monitor = R11_scratch1, 4001 Rcurrent_obj = R12_scratch2, 4002 Robj_to_lock = R17_tos, 4003 Rcurrent_obj_addr = R3_ARG1, 4004 Rlimit = R4_ARG2; 4005 Label Lfound, Lillegal_monitor_state; 4006 4007 // Check corner case: unbalanced monitorEnter / Exit. 4008 __ ld(Rlimit, 0, R1_SP); 4009 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4010 4011 // Null pointer check. 4012 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4013 4014 __ cmpld(CCR0, R26_monitor, Rlimit); 4015 __ bgt(CCR0, Lillegal_monitor_state); 4016 4017 // Find the corresponding slot in the monitors stack section. 4018 { 4019 Label Lloop; 4020 4021 // Start with topmost monitor. 4022 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4023 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4024 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4025 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4026 4027 __ bind(Lloop); 4028 // Is this entry for same obj? 4029 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4030 __ beq(CCR0, Lfound); 4031 4032 // Check if last allocated BasicLockObj reached. 4033 4034 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4035 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4036 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4037 4038 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4039 __ ble(CCR0, Lloop); 4040 } 4041 4042 // Fell through without finding the basic obj lock => throw up! 4043 __ bind(Lillegal_monitor_state); 4044 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4045 __ should_not_reach_here(); 4046 4047 __ align(32, 12); 4048 __ bind(Lfound); 4049 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4050 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4051 __ unlock_object(Rcurrent_monitor); 4052 } 4053 4054 // ============================================================================ 4055 // Wide bytecodes 4056 4057 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4058 void TemplateTable::wide() { 4059 transition(vtos, vtos); 4060 4061 const Register Rtable = R11_scratch1, 4062 Rindex = R12_scratch2, 4063 Rtmp = R0; 4064 4065 __ lbz(Rindex, 1, R14_bcp); 4066 4067 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4068 4069 __ slwi(Rindex, Rindex, LogBytesPerWord); 4070 __ ldx(Rtmp, Rtable, Rindex); 4071 __ mtctr(Rtmp); 4072 __ bctr(); 4073 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4074 } 4075 #endif // !CC_INTERP