1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/templateInterpreter.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 #include "utilities/macros.hpp" 40 41 #ifndef CC_INTERP 42 43 #undef __ 44 #define __ _masm-> 45 46 // ============================================================================ 47 // Misc helpers 48 49 // Do an oop store like *(base + index) = val OR *(base + offset) = val 50 // (only one of both variants is possible at the same time). 51 // Index can be noreg. 52 // Kills: 53 // Rbase, Rtmp 54 static void do_oop_store(InterpreterMacroAssembler* _masm, 55 Register Rbase, 56 RegisterOrConstant offset, 57 Register Rval, // Noreg means always null. 58 Register Rtmp1, 59 Register Rtmp2, 60 Register Rtmp3, 61 BarrierSet::Name barrier, 62 bool precise, 63 bool check_null) { 64 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 65 66 switch (barrier) { 67 #if INCLUDE_ALL_GCS 68 case BarrierSet::G1SATBCT: 69 case BarrierSet::G1SATBCTLogging: 70 { 71 // Load and record the previous value. 72 __ g1_write_barrier_pre(Rbase, offset, 73 Rtmp3, /* holder of pre_val ? */ 74 Rtmp1, Rtmp2, false /* frame */); 75 76 Label Lnull, Ldone; 77 if (Rval != noreg) { 78 if (check_null) { 79 __ cmpdi(CCR0, Rval, 0); 80 __ beq(CCR0, Lnull); 81 } 82 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 83 // Mark the card. 84 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 85 __ add(Rbase, offset, Rbase); 86 } 87 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 88 if (check_null) { __ b(Ldone); } 89 } 90 91 if (Rval == noreg || check_null) { // Store null oop. 92 Register Rnull = Rval; 93 __ bind(Lnull); 94 if (Rval == noreg) { 95 Rnull = Rtmp1; 96 __ li(Rnull, 0); 97 } 98 if (UseCompressedOops) { 99 __ stw(Rnull, offset, Rbase); 100 } else { 101 __ std(Rnull, offset, Rbase); 102 } 103 } 104 __ bind(Ldone); 105 } 106 break; 107 #endif // INCLUDE_ALL_GCS 108 case BarrierSet::CardTableModRef: 109 case BarrierSet::CardTableExtension: 110 { 111 Label Lnull, Ldone; 112 if (Rval != noreg) { 113 if (check_null) { 114 __ cmpdi(CCR0, Rval, 0); 115 __ beq(CCR0, Lnull); 116 } 117 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 118 // Mark the card. 119 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 120 __ add(Rbase, offset, Rbase); 121 } 122 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 123 if (check_null) { 124 __ b(Ldone); 125 } 126 } 127 128 if (Rval == noreg || check_null) { // Store null oop. 129 Register Rnull = Rval; 130 __ bind(Lnull); 131 if (Rval == noreg) { 132 Rnull = Rtmp1; 133 __ li(Rnull, 0); 134 } 135 if (UseCompressedOops) { 136 __ stw(Rnull, offset, Rbase); 137 } else { 138 __ std(Rnull, offset, Rbase); 139 } 140 } 141 __ bind(Ldone); 142 } 143 break; 144 case BarrierSet::ModRef: 145 case BarrierSet::Other: 146 ShouldNotReachHere(); 147 break; 148 default: 149 ShouldNotReachHere(); 150 } 151 } 152 153 // ============================================================================ 154 // Platform-dependent initialization 155 156 void TemplateTable::pd_initialize() { 157 // No ppc64 specific initialization. 158 } 159 160 Address TemplateTable::at_bcp(int offset) { 161 // Not used on ppc. 162 ShouldNotReachHere(); 163 return Address(); 164 } 165 166 // Patches the current bytecode (ptr to it located in bcp) 167 // in the bytecode stream with a new one. 168 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 169 // With sharing on, may need to test method flag. 170 if (!RewriteBytecodes) return; 171 Label L_patch_done; 172 173 switch (new_bc) { 174 case Bytecodes::_fast_aputfield: 175 case Bytecodes::_fast_bputfield: 176 case Bytecodes::_fast_cputfield: 177 case Bytecodes::_fast_dputfield: 178 case Bytecodes::_fast_fputfield: 179 case Bytecodes::_fast_iputfield: 180 case Bytecodes::_fast_lputfield: 181 case Bytecodes::_fast_sputfield: 182 { 183 // We skip bytecode quickening for putfield instructions when 184 // the put_code written to the constant pool cache is zero. 185 // This is required so that every execution of this instruction 186 // calls out to InterpreterRuntime::resolve_get_put to do 187 // additional, required work. 188 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 189 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 190 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 191 // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF 192 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 193 __ cmpwi(CCR0, Rnew_bc, 0); 194 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 195 __ beq(CCR0, L_patch_done); 196 // __ isync(); // acquire not needed 197 break; 198 } 199 200 default: 201 assert(byte_no == -1, "sanity"); 202 if (load_bc_into_bc_reg) { 203 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 204 } 205 } 206 207 if (JvmtiExport::can_post_breakpoint()) { 208 Label L_fast_patch; 209 __ lbz(Rtemp, 0, R14_bcp); 210 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 211 __ bne(CCR0, L_fast_patch); 212 // Perform the quickening, slowly, in the bowels of the breakpoint table. 213 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 214 __ b(L_patch_done); 215 __ bind(L_fast_patch); 216 } 217 218 // Patch bytecode. 219 __ stb(Rnew_bc, 0, R14_bcp); 220 221 __ bind(L_patch_done); 222 } 223 224 // ============================================================================ 225 // Individual instructions 226 227 void TemplateTable::nop() { 228 transition(vtos, vtos); 229 // Nothing to do. 230 } 231 232 void TemplateTable::shouldnotreachhere() { 233 transition(vtos, vtos); 234 __ stop("shouldnotreachhere bytecode"); 235 } 236 237 void TemplateTable::aconst_null() { 238 transition(vtos, atos); 239 __ li(R17_tos, 0); 240 } 241 242 void TemplateTable::iconst(int value) { 243 transition(vtos, itos); 244 assert(value >= -1 && value <= 5, ""); 245 __ li(R17_tos, value); 246 } 247 248 void TemplateTable::lconst(int value) { 249 transition(vtos, ltos); 250 assert(value >= -1 && value <= 5, ""); 251 __ li(R17_tos, value); 252 } 253 254 void TemplateTable::fconst(int value) { 255 transition(vtos, ftos); 256 static float zero = 0.0; 257 static float one = 1.0; 258 static float two = 2.0; 259 switch (value) { 260 default: ShouldNotReachHere(); 261 case 0: { 262 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 263 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 264 break; 265 } 266 case 1: { 267 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 268 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 269 break; 270 } 271 case 2: { 272 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 273 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 274 break; 275 } 276 } 277 } 278 279 void TemplateTable::dconst(int value) { 280 transition(vtos, dtos); 281 static double zero = 0.0; 282 static double one = 1.0; 283 switch (value) { 284 case 0: { 285 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 286 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 287 break; 288 } 289 case 1: { 290 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 291 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 292 break; 293 } 294 default: ShouldNotReachHere(); 295 } 296 } 297 298 void TemplateTable::bipush() { 299 transition(vtos, itos); 300 __ lbz(R17_tos, 1, R14_bcp); 301 __ extsb(R17_tos, R17_tos); 302 } 303 304 void TemplateTable::sipush() { 305 transition(vtos, itos); 306 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 307 } 308 309 void TemplateTable::ldc(bool wide) { 310 Register Rscratch1 = R11_scratch1, 311 Rscratch2 = R12_scratch2, 312 Rcpool = R3_ARG1; 313 314 transition(vtos, vtos); 315 Label notInt, notClass, exit; 316 317 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 318 if (wide) { // Read index. 319 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 320 } else { 321 __ lbz(Rscratch1, 1, R14_bcp); 322 } 323 324 const int base_offset = ConstantPool::header_size() * wordSize; 325 const int tags_offset = Array<u1>::base_offset_in_bytes(); 326 327 // Get type from tags. 328 __ addi(Rscratch2, Rscratch2, tags_offset); 329 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 330 331 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 332 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 333 __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 334 335 // Resolved class - need to call vm to get java mirror of the class. 336 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 337 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above? 338 __ beq(CCR0, notClass); 339 340 __ li(R4, wide ? 1 : 0); 341 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 342 __ push(atos); 343 __ b(exit); 344 345 __ align(32, 12); 346 __ bind(notClass); 347 __ addi(Rcpool, Rcpool, base_offset); 348 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 349 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 350 __ bne(CCR0, notInt); 351 __ isync(); // Order load of constant wrt. tags. 352 __ lwax(R17_tos, Rcpool, Rscratch1); 353 __ push(itos); 354 __ b(exit); 355 356 __ align(32, 12); 357 __ bind(notInt); 358 #ifdef ASSERT 359 // String and Object are rewritten to fast_aldc 360 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 361 __ asm_assert_eq("unexpected type", 0x8765); 362 #endif 363 __ isync(); // Order load of constant wrt. tags. 364 __ lfsx(F15_ftos, Rcpool, Rscratch1); 365 __ push(ftos); 366 367 __ align(32, 12); 368 __ bind(exit); 369 } 370 371 // Fast path for caching oop constants. 372 void TemplateTable::fast_aldc(bool wide) { 373 transition(vtos, atos); 374 375 int index_size = wide ? sizeof(u2) : sizeof(u1); 376 const Register Rscratch = R11_scratch1; 377 Label resolved; 378 379 // We are resolved if the resolved reference cache entry contains a 380 // non-null object (CallSite, etc.) 381 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 382 __ load_resolved_reference_at_index(R17_tos, Rscratch); 383 __ cmpdi(CCR0, R17_tos, 0); 384 __ bne(CCR0, resolved); 385 __ load_const_optimized(R3_ARG1, (int)bytecode()); 386 387 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 388 389 // First time invocation - must resolve first. 390 __ call_VM(R17_tos, entry, R3_ARG1); 391 392 __ align(32, 12); 393 __ bind(resolved); 394 __ verify_oop(R17_tos); 395 } 396 397 void TemplateTable::ldc2_w() { 398 transition(vtos, vtos); 399 Label Llong, Lexit; 400 401 Register Rindex = R11_scratch1, 402 Rcpool = R12_scratch2, 403 Rtag = R3_ARG1; 404 __ get_cpool_and_tags(Rcpool, Rtag); 405 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 406 407 const int base_offset = ConstantPool::header_size() * wordSize; 408 const int tags_offset = Array<u1>::base_offset_in_bytes(); 409 // Get type from tags. 410 __ addi(Rcpool, Rcpool, base_offset); 411 __ addi(Rtag, Rtag, tags_offset); 412 413 __ lbzx(Rtag, Rtag, Rindex); 414 415 __ sldi(Rindex, Rindex, LogBytesPerWord); 416 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 417 __ bne(CCR0, Llong); 418 // A double can be placed at word-aligned locations in the constant pool. 419 // Check out Conversions.java for an example. 420 // Also ConstantPool::header_size() is 20, which makes it very difficult 421 // to double-align double on the constant pool. SG, 11/7/97 422 __ isync(); // Order load of constant wrt. tags. 423 __ lfdx(F15_ftos, Rcpool, Rindex); 424 __ push(dtos); 425 __ b(Lexit); 426 427 __ bind(Llong); 428 __ isync(); // Order load of constant wrt. tags. 429 __ ldx(R17_tos, Rcpool, Rindex); 430 __ push(ltos); 431 432 __ bind(Lexit); 433 } 434 435 // Get the locals index located in the bytecode stream at bcp + offset. 436 void TemplateTable::locals_index(Register Rdst, int offset) { 437 __ lbz(Rdst, offset, R14_bcp); 438 } 439 440 void TemplateTable::iload() { 441 transition(vtos, itos); 442 443 // Get the local value into tos 444 const Register Rindex = R22_tmp2; 445 locals_index(Rindex); 446 447 // Rewrite iload,iload pair into fast_iload2 448 // iload,caload pair into fast_icaload 449 if (RewriteFrequentPairs) { 450 Label Lrewrite, Ldone; 451 Register Rnext_byte = R3_ARG1, 452 Rrewrite_to = R6_ARG4, 453 Rscratch = R11_scratch1; 454 455 // get next byte 456 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 457 458 // if _iload, wait to rewrite to iload2. We only want to rewrite the 459 // last two iloads in a pair. Comparing against fast_iload means that 460 // the next bytecode is neither an iload or a caload, and therefore 461 // an iload pair. 462 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 463 __ beq(CCR0, Ldone); 464 465 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 466 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 467 __ beq(CCR1, Lrewrite); 468 469 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 470 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 471 __ beq(CCR0, Lrewrite); 472 473 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 474 475 __ bind(Lrewrite); 476 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 477 __ bind(Ldone); 478 } 479 480 __ load_local_int(R17_tos, Rindex, Rindex); 481 } 482 483 // Load 2 integers in a row without dispatching 484 void TemplateTable::fast_iload2() { 485 transition(vtos, itos); 486 487 __ lbz(R3_ARG1, 1, R14_bcp); 488 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 489 490 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 491 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 492 __ push_i(R3_ARG1); 493 } 494 495 void TemplateTable::fast_iload() { 496 transition(vtos, itos); 497 // Get the local value into tos 498 499 const Register Rindex = R11_scratch1; 500 locals_index(Rindex); 501 __ load_local_int(R17_tos, Rindex, Rindex); 502 } 503 504 // Load a local variable type long from locals area to TOS cache register. 505 // Local index resides in bytecodestream. 506 void TemplateTable::lload() { 507 transition(vtos, ltos); 508 509 const Register Rindex = R11_scratch1; 510 locals_index(Rindex); 511 __ load_local_long(R17_tos, Rindex, Rindex); 512 } 513 514 void TemplateTable::fload() { 515 transition(vtos, ftos); 516 517 const Register Rindex = R11_scratch1; 518 locals_index(Rindex); 519 __ load_local_float(F15_ftos, Rindex, Rindex); 520 } 521 522 void TemplateTable::dload() { 523 transition(vtos, dtos); 524 525 const Register Rindex = R11_scratch1; 526 locals_index(Rindex); 527 __ load_local_double(F15_ftos, Rindex, Rindex); 528 } 529 530 void TemplateTable::aload() { 531 transition(vtos, atos); 532 533 const Register Rindex = R11_scratch1; 534 locals_index(Rindex); 535 __ load_local_ptr(R17_tos, Rindex, Rindex); 536 } 537 538 void TemplateTable::locals_index_wide(Register Rdst) { 539 // Offset is 2, not 1, because Lbcp points to wide prefix code. 540 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 541 } 542 543 void TemplateTable::wide_iload() { 544 // Get the local value into tos. 545 546 const Register Rindex = R11_scratch1; 547 locals_index_wide(Rindex); 548 __ load_local_int(R17_tos, Rindex, Rindex); 549 } 550 551 void TemplateTable::wide_lload() { 552 transition(vtos, ltos); 553 554 const Register Rindex = R11_scratch1; 555 locals_index_wide(Rindex); 556 __ load_local_long(R17_tos, Rindex, Rindex); 557 } 558 559 void TemplateTable::wide_fload() { 560 transition(vtos, ftos); 561 562 const Register Rindex = R11_scratch1; 563 locals_index_wide(Rindex); 564 __ load_local_float(F15_ftos, Rindex, Rindex); 565 } 566 567 void TemplateTable::wide_dload() { 568 transition(vtos, dtos); 569 570 const Register Rindex = R11_scratch1; 571 locals_index_wide(Rindex); 572 __ load_local_double(F15_ftos, Rindex, Rindex); 573 } 574 575 void TemplateTable::wide_aload() { 576 transition(vtos, atos); 577 578 const Register Rindex = R11_scratch1; 579 locals_index_wide(Rindex); 580 __ load_local_ptr(R17_tos, Rindex, Rindex); 581 } 582 583 void TemplateTable::iaload() { 584 transition(itos, itos); 585 586 const Register Rload_addr = R3_ARG1, 587 Rarray = R4_ARG2, 588 Rtemp = R5_ARG3; 589 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 590 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 591 } 592 593 void TemplateTable::laload() { 594 transition(itos, ltos); 595 596 const Register Rload_addr = R3_ARG1, 597 Rarray = R4_ARG2, 598 Rtemp = R5_ARG3; 599 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 600 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 601 } 602 603 void TemplateTable::faload() { 604 transition(itos, ftos); 605 606 const Register Rload_addr = R3_ARG1, 607 Rarray = R4_ARG2, 608 Rtemp = R5_ARG3; 609 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 610 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 611 } 612 613 void TemplateTable::daload() { 614 transition(itos, dtos); 615 616 const Register Rload_addr = R3_ARG1, 617 Rarray = R4_ARG2, 618 Rtemp = R5_ARG3; 619 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 620 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 621 } 622 623 void TemplateTable::aaload() { 624 transition(itos, atos); 625 626 // tos: index 627 // result tos: array 628 const Register Rload_addr = R3_ARG1, 629 Rarray = R4_ARG2, 630 Rtemp = R5_ARG3; 631 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 632 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 633 __ verify_oop(R17_tos); 634 //__ dcbt(R17_tos); // prefetch 635 } 636 637 void TemplateTable::baload() { 638 transition(itos, itos); 639 640 const Register Rload_addr = R3_ARG1, 641 Rarray = R4_ARG2, 642 Rtemp = R5_ARG3; 643 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 644 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 645 __ extsb(R17_tos, R17_tos); 646 } 647 648 void TemplateTable::caload() { 649 transition(itos, itos); 650 651 const Register Rload_addr = R3_ARG1, 652 Rarray = R4_ARG2, 653 Rtemp = R5_ARG3; 654 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 655 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 656 } 657 658 // Iload followed by caload frequent pair. 659 void TemplateTable::fast_icaload() { 660 transition(vtos, itos); 661 662 const Register Rload_addr = R3_ARG1, 663 Rarray = R4_ARG2, 664 Rtemp = R11_scratch1; 665 666 locals_index(R17_tos); 667 __ load_local_int(R17_tos, Rtemp, R17_tos); 668 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 669 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 670 } 671 672 void TemplateTable::saload() { 673 transition(itos, itos); 674 675 const Register Rload_addr = R11_scratch1, 676 Rarray = R12_scratch2, 677 Rtemp = R3_ARG1; 678 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 679 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 680 } 681 682 void TemplateTable::iload(int n) { 683 transition(vtos, itos); 684 685 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 686 } 687 688 void TemplateTable::lload(int n) { 689 transition(vtos, ltos); 690 691 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 692 } 693 694 void TemplateTable::fload(int n) { 695 transition(vtos, ftos); 696 697 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 698 } 699 700 void TemplateTable::dload(int n) { 701 transition(vtos, dtos); 702 703 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 704 } 705 706 void TemplateTable::aload(int n) { 707 transition(vtos, atos); 708 709 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 710 } 711 712 void TemplateTable::aload_0() { 713 transition(vtos, atos); 714 // According to bytecode histograms, the pairs: 715 // 716 // _aload_0, _fast_igetfield 717 // _aload_0, _fast_agetfield 718 // _aload_0, _fast_fgetfield 719 // 720 // occur frequently. If RewriteFrequentPairs is set, the (slow) 721 // _aload_0 bytecode checks if the next bytecode is either 722 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 723 // rewrites the current bytecode into a pair bytecode; otherwise it 724 // rewrites the current bytecode into _0 that doesn't do 725 // the pair check anymore. 726 // 727 // Note: If the next bytecode is _getfield, the rewrite must be 728 // delayed, otherwise we may miss an opportunity for a pair. 729 // 730 // Also rewrite frequent pairs 731 // aload_0, aload_1 732 // aload_0, iload_1 733 // These bytecodes with a small amount of code are most profitable 734 // to rewrite. 735 736 if (RewriteFrequentPairs) { 737 738 Label Lrewrite, Ldont_rewrite; 739 Register Rnext_byte = R3_ARG1, 740 Rrewrite_to = R6_ARG4, 741 Rscratch = R11_scratch1; 742 743 // Get next byte. 744 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 745 746 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 747 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 748 __ beq(CCR0, Ldont_rewrite); 749 750 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 751 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 752 __ beq(CCR1, Lrewrite); 753 754 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 755 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 756 __ beq(CCR0, Lrewrite); 757 758 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 759 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 760 __ beq(CCR1, Lrewrite); 761 762 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 763 764 __ bind(Lrewrite); 765 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 766 __ bind(Ldont_rewrite); 767 } 768 769 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 770 aload(0); 771 } 772 773 void TemplateTable::istore() { 774 transition(itos, vtos); 775 776 const Register Rindex = R11_scratch1; 777 locals_index(Rindex); 778 __ store_local_int(R17_tos, Rindex); 779 } 780 781 void TemplateTable::lstore() { 782 transition(ltos, vtos); 783 const Register Rindex = R11_scratch1; 784 locals_index(Rindex); 785 __ store_local_long(R17_tos, Rindex); 786 } 787 788 void TemplateTable::fstore() { 789 transition(ftos, vtos); 790 791 const Register Rindex = R11_scratch1; 792 locals_index(Rindex); 793 __ store_local_float(F15_ftos, Rindex); 794 } 795 796 void TemplateTable::dstore() { 797 transition(dtos, vtos); 798 799 const Register Rindex = R11_scratch1; 800 locals_index(Rindex); 801 __ store_local_double(F15_ftos, Rindex); 802 } 803 804 void TemplateTable::astore() { 805 transition(vtos, vtos); 806 807 const Register Rindex = R11_scratch1; 808 __ pop_ptr(); 809 __ verify_oop_or_return_address(R17_tos, Rindex); 810 locals_index(Rindex); 811 __ store_local_ptr(R17_tos, Rindex); 812 } 813 814 void TemplateTable::wide_istore() { 815 transition(vtos, vtos); 816 817 const Register Rindex = R11_scratch1; 818 __ pop_i(); 819 locals_index_wide(Rindex); 820 __ store_local_int(R17_tos, Rindex); 821 } 822 823 void TemplateTable::wide_lstore() { 824 transition(vtos, vtos); 825 826 const Register Rindex = R11_scratch1; 827 __ pop_l(); 828 locals_index_wide(Rindex); 829 __ store_local_long(R17_tos, Rindex); 830 } 831 832 void TemplateTable::wide_fstore() { 833 transition(vtos, vtos); 834 835 const Register Rindex = R11_scratch1; 836 __ pop_f(); 837 locals_index_wide(Rindex); 838 __ store_local_float(F15_ftos, Rindex); 839 } 840 841 void TemplateTable::wide_dstore() { 842 transition(vtos, vtos); 843 844 const Register Rindex = R11_scratch1; 845 __ pop_d(); 846 locals_index_wide(Rindex); 847 __ store_local_double(F15_ftos, Rindex); 848 } 849 850 void TemplateTable::wide_astore() { 851 transition(vtos, vtos); 852 853 const Register Rindex = R11_scratch1; 854 __ pop_ptr(); 855 __ verify_oop_or_return_address(R17_tos, Rindex); 856 locals_index_wide(Rindex); 857 __ store_local_ptr(R17_tos, Rindex); 858 } 859 860 void TemplateTable::iastore() { 861 transition(itos, vtos); 862 863 const Register Rindex = R3_ARG1, 864 Rstore_addr = R4_ARG2, 865 Rarray = R5_ARG3, 866 Rtemp = R6_ARG4; 867 __ pop_i(Rindex); 868 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 869 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 870 } 871 872 void TemplateTable::lastore() { 873 transition(ltos, vtos); 874 875 const Register Rindex = R3_ARG1, 876 Rstore_addr = R4_ARG2, 877 Rarray = R5_ARG3, 878 Rtemp = R6_ARG4; 879 __ pop_i(Rindex); 880 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 881 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 882 } 883 884 void TemplateTable::fastore() { 885 transition(ftos, vtos); 886 887 const Register Rindex = R3_ARG1, 888 Rstore_addr = R4_ARG2, 889 Rarray = R5_ARG3, 890 Rtemp = R6_ARG4; 891 __ pop_i(Rindex); 892 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 893 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 894 } 895 896 void TemplateTable::dastore() { 897 transition(dtos, vtos); 898 899 const Register Rindex = R3_ARG1, 900 Rstore_addr = R4_ARG2, 901 Rarray = R5_ARG3, 902 Rtemp = R6_ARG4; 903 __ pop_i(Rindex); 904 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 905 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 906 } 907 908 // Pop 3 values from the stack and... 909 void TemplateTable::aastore() { 910 transition(vtos, vtos); 911 912 Label Lstore_ok, Lis_null, Ldone; 913 const Register Rindex = R3_ARG1, 914 Rarray = R4_ARG2, 915 Rscratch = R11_scratch1, 916 Rscratch2 = R12_scratch2, 917 Rarray_klass = R5_ARG3, 918 Rarray_element_klass = Rarray_klass, 919 Rvalue_klass = R6_ARG4, 920 Rstore_addr = R31; // Use register which survives VM call. 921 922 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 923 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 924 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 925 926 __ verify_oop(R17_tos); 927 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 928 // Rindex is dead! 929 Register Rscratch3 = Rindex; 930 931 // Do array store check - check for NULL value first. 932 __ cmpdi(CCR0, R17_tos, 0); 933 __ beq(CCR0, Lis_null); 934 935 __ load_klass(Rarray_klass, Rarray); 936 __ load_klass(Rvalue_klass, R17_tos); 937 938 // Do fast instanceof cache test. 939 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 940 941 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 942 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 943 944 // Fell through: subtype check failed => throw an exception. 945 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 946 __ mtctr(R11_scratch1); 947 __ bctr(); 948 949 __ bind(Lis_null); 950 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 951 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 952 __ profile_null_seen(Rscratch, Rscratch2); 953 __ b(Ldone); 954 955 // Store is OK. 956 __ bind(Lstore_ok); 957 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 958 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 959 960 __ bind(Ldone); 961 // Adjust sp (pops array, index and value). 962 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 963 } 964 965 void TemplateTable::bastore() { 966 transition(itos, vtos); 967 968 const Register Rindex = R11_scratch1, 969 Rarray = R12_scratch2, 970 Rscratch = R3_ARG1; 971 __ pop_i(Rindex); 972 // tos: val 973 // Rarray: array ptr (popped by index_check) 974 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray); 975 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 976 } 977 978 void TemplateTable::castore() { 979 transition(itos, vtos); 980 981 const Register Rindex = R11_scratch1, 982 Rarray = R12_scratch2, 983 Rscratch = R3_ARG1; 984 __ pop_i(Rindex); 985 // tos: val 986 // Rarray: array ptr (popped by index_check) 987 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 988 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 989 } 990 991 void TemplateTable::sastore() { 992 castore(); 993 } 994 995 void TemplateTable::istore(int n) { 996 transition(itos, vtos); 997 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 998 } 999 1000 void TemplateTable::lstore(int n) { 1001 transition(ltos, vtos); 1002 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1003 } 1004 1005 void TemplateTable::fstore(int n) { 1006 transition(ftos, vtos); 1007 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1008 } 1009 1010 void TemplateTable::dstore(int n) { 1011 transition(dtos, vtos); 1012 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1013 } 1014 1015 void TemplateTable::astore(int n) { 1016 transition(vtos, vtos); 1017 1018 __ pop_ptr(); 1019 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1020 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1021 } 1022 1023 void TemplateTable::pop() { 1024 transition(vtos, vtos); 1025 1026 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1027 } 1028 1029 void TemplateTable::pop2() { 1030 transition(vtos, vtos); 1031 1032 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1033 } 1034 1035 void TemplateTable::dup() { 1036 transition(vtos, vtos); 1037 1038 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1039 __ push_ptr(R11_scratch1); 1040 } 1041 1042 void TemplateTable::dup_x1() { 1043 transition(vtos, vtos); 1044 1045 Register Ra = R11_scratch1, 1046 Rb = R12_scratch2; 1047 // stack: ..., a, b 1048 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1049 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1050 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1051 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1052 __ push_ptr(Rb); 1053 // stack: ..., b, a, b 1054 } 1055 1056 void TemplateTable::dup_x2() { 1057 transition(vtos, vtos); 1058 1059 Register Ra = R11_scratch1, 1060 Rb = R12_scratch2, 1061 Rc = R3_ARG1; 1062 1063 // stack: ..., a, b, c 1064 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1065 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1066 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1067 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1068 // stack: ..., c, b, c 1069 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1070 // stack: ..., c, a, c 1071 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1072 __ push_ptr(Rc); // push c 1073 // stack: ..., c, a, b, c 1074 } 1075 1076 void TemplateTable::dup2() { 1077 transition(vtos, vtos); 1078 1079 Register Ra = R11_scratch1, 1080 Rb = R12_scratch2; 1081 // stack: ..., a, b 1082 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1083 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1084 __ push_2ptrs(Ra, Rb); 1085 // stack: ..., a, b, a, b 1086 } 1087 1088 void TemplateTable::dup2_x1() { 1089 transition(vtos, vtos); 1090 1091 Register Ra = R11_scratch1, 1092 Rb = R12_scratch2, 1093 Rc = R3_ARG1; 1094 // stack: ..., a, b, c 1095 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1096 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1097 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1098 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1099 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1100 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1101 // stack: ..., b, c, a 1102 __ push_2ptrs(Rb, Rc); 1103 // stack: ..., b, c, a, b, c 1104 } 1105 1106 void TemplateTable::dup2_x2() { 1107 transition(vtos, vtos); 1108 1109 Register Ra = R11_scratch1, 1110 Rb = R12_scratch2, 1111 Rc = R3_ARG1, 1112 Rd = R4_ARG2; 1113 // stack: ..., a, b, c, d 1114 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1115 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1116 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1117 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1118 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1119 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1120 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1121 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1122 // stack: ..., c, d, a, b 1123 __ push_2ptrs(Rc, Rd); 1124 // stack: ..., c, d, a, b, c, d 1125 } 1126 1127 void TemplateTable::swap() { 1128 transition(vtos, vtos); 1129 // stack: ..., a, b 1130 1131 Register Ra = R11_scratch1, 1132 Rb = R12_scratch2; 1133 // stack: ..., a, b 1134 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1135 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1136 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1137 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1138 // stack: ..., b, a 1139 } 1140 1141 void TemplateTable::iop2(Operation op) { 1142 transition(itos, itos); 1143 1144 Register Rscratch = R11_scratch1; 1145 1146 __ pop_i(Rscratch); 1147 // tos = number of bits to shift 1148 // Rscratch = value to shift 1149 switch (op) { 1150 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1151 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1152 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1153 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1154 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1155 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1156 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1157 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1158 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1159 default: ShouldNotReachHere(); 1160 } 1161 } 1162 1163 void TemplateTable::lop2(Operation op) { 1164 transition(ltos, ltos); 1165 1166 Register Rscratch = R11_scratch1; 1167 __ pop_l(Rscratch); 1168 switch (op) { 1169 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1170 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1171 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1172 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1173 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1174 default: ShouldNotReachHere(); 1175 } 1176 } 1177 1178 void TemplateTable::idiv() { 1179 transition(itos, itos); 1180 1181 Label Lnormal, Lexception, Ldone; 1182 Register Rdividend = R11_scratch1; // Used by irem. 1183 1184 __ addi(R0, R17_tos, 1); 1185 __ cmplwi(CCR0, R0, 2); 1186 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1187 1188 __ cmpwi(CCR1, R17_tos, 0); 1189 __ beq(CCR1, Lexception); // divisor == 0 1190 1191 __ pop_i(Rdividend); 1192 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1193 __ b(Ldone); 1194 1195 __ bind(Lexception); 1196 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1197 __ mtctr(R11_scratch1); 1198 __ bctr(); 1199 1200 __ align(32, 12); 1201 __ bind(Lnormal); 1202 __ pop_i(Rdividend); 1203 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1204 __ bind(Ldone); 1205 } 1206 1207 void TemplateTable::irem() { 1208 transition(itos, itos); 1209 1210 __ mr(R12_scratch2, R17_tos); 1211 idiv(); 1212 __ mullw(R17_tos, R17_tos, R12_scratch2); 1213 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1214 } 1215 1216 void TemplateTable::lmul() { 1217 transition(ltos, ltos); 1218 1219 __ pop_l(R11_scratch1); 1220 __ mulld(R17_tos, R11_scratch1, R17_tos); 1221 } 1222 1223 void TemplateTable::ldiv() { 1224 transition(ltos, ltos); 1225 1226 Label Lnormal, Lexception, Ldone; 1227 Register Rdividend = R11_scratch1; // Used by lrem. 1228 1229 __ addi(R0, R17_tos, 1); 1230 __ cmpldi(CCR0, R0, 2); 1231 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1232 1233 __ cmpdi(CCR1, R17_tos, 0); 1234 __ beq(CCR1, Lexception); // divisor == 0 1235 1236 __ pop_l(Rdividend); 1237 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1238 __ b(Ldone); 1239 1240 __ bind(Lexception); 1241 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1242 __ mtctr(R11_scratch1); 1243 __ bctr(); 1244 1245 __ align(32, 12); 1246 __ bind(Lnormal); 1247 __ pop_l(Rdividend); 1248 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1249 __ bind(Ldone); 1250 } 1251 1252 void TemplateTable::lrem() { 1253 transition(ltos, ltos); 1254 1255 __ mr(R12_scratch2, R17_tos); 1256 ldiv(); 1257 __ mulld(R17_tos, R17_tos, R12_scratch2); 1258 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1259 } 1260 1261 void TemplateTable::lshl() { 1262 transition(itos, ltos); 1263 1264 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1265 __ pop_l(R11_scratch1); 1266 __ sld(R17_tos, R11_scratch1, R17_tos); 1267 } 1268 1269 void TemplateTable::lshr() { 1270 transition(itos, ltos); 1271 1272 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1273 __ pop_l(R11_scratch1); 1274 __ srad(R17_tos, R11_scratch1, R17_tos); 1275 } 1276 1277 void TemplateTable::lushr() { 1278 transition(itos, ltos); 1279 1280 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1281 __ pop_l(R11_scratch1); 1282 __ srd(R17_tos, R11_scratch1, R17_tos); 1283 } 1284 1285 void TemplateTable::fop2(Operation op) { 1286 transition(ftos, ftos); 1287 1288 switch (op) { 1289 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1290 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1291 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1292 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1293 case rem: 1294 __ pop_f(F1_ARG1); 1295 __ fmr(F2_ARG2, F15_ftos); 1296 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1297 __ fmr(F15_ftos, F1_RET); 1298 break; 1299 1300 default: ShouldNotReachHere(); 1301 } 1302 } 1303 1304 void TemplateTable::dop2(Operation op) { 1305 transition(dtos, dtos); 1306 1307 switch (op) { 1308 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1309 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1310 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1311 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1312 case rem: 1313 __ pop_d(F1_ARG1); 1314 __ fmr(F2_ARG2, F15_ftos); 1315 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1316 __ fmr(F15_ftos, F1_RET); 1317 break; 1318 1319 default: ShouldNotReachHere(); 1320 } 1321 } 1322 1323 // Negate the value in the TOS cache. 1324 void TemplateTable::ineg() { 1325 transition(itos, itos); 1326 1327 __ neg(R17_tos, R17_tos); 1328 } 1329 1330 // Negate the value in the TOS cache. 1331 void TemplateTable::lneg() { 1332 transition(ltos, ltos); 1333 1334 __ neg(R17_tos, R17_tos); 1335 } 1336 1337 void TemplateTable::fneg() { 1338 transition(ftos, ftos); 1339 1340 __ fneg(F15_ftos, F15_ftos); 1341 } 1342 1343 void TemplateTable::dneg() { 1344 transition(dtos, dtos); 1345 1346 __ fneg(F15_ftos, F15_ftos); 1347 } 1348 1349 // Increments a local variable in place. 1350 void TemplateTable::iinc() { 1351 transition(vtos, vtos); 1352 1353 const Register Rindex = R11_scratch1, 1354 Rincrement = R0, 1355 Rvalue = R12_scratch2; 1356 1357 locals_index(Rindex); // Load locals index from bytecode stream. 1358 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1359 __ extsb(Rincrement, Rincrement); 1360 1361 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1362 1363 __ add(Rvalue, Rincrement, Rvalue); 1364 __ stw(Rvalue, 0, Rindex); 1365 } 1366 1367 void TemplateTable::wide_iinc() { 1368 transition(vtos, vtos); 1369 1370 Register Rindex = R11_scratch1, 1371 Rlocals_addr = Rindex, 1372 Rincr = R12_scratch2; 1373 locals_index_wide(Rindex); 1374 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1375 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1376 __ add(R17_tos, Rincr, R17_tos); 1377 __ stw(R17_tos, 0, Rlocals_addr); 1378 } 1379 1380 void TemplateTable::convert() { 1381 // %%%%% Factor this first part accross platforms 1382 #ifdef ASSERT 1383 TosState tos_in = ilgl; 1384 TosState tos_out = ilgl; 1385 switch (bytecode()) { 1386 case Bytecodes::_i2l: // fall through 1387 case Bytecodes::_i2f: // fall through 1388 case Bytecodes::_i2d: // fall through 1389 case Bytecodes::_i2b: // fall through 1390 case Bytecodes::_i2c: // fall through 1391 case Bytecodes::_i2s: tos_in = itos; break; 1392 case Bytecodes::_l2i: // fall through 1393 case Bytecodes::_l2f: // fall through 1394 case Bytecodes::_l2d: tos_in = ltos; break; 1395 case Bytecodes::_f2i: // fall through 1396 case Bytecodes::_f2l: // fall through 1397 case Bytecodes::_f2d: tos_in = ftos; break; 1398 case Bytecodes::_d2i: // fall through 1399 case Bytecodes::_d2l: // fall through 1400 case Bytecodes::_d2f: tos_in = dtos; break; 1401 default : ShouldNotReachHere(); 1402 } 1403 switch (bytecode()) { 1404 case Bytecodes::_l2i: // fall through 1405 case Bytecodes::_f2i: // fall through 1406 case Bytecodes::_d2i: // fall through 1407 case Bytecodes::_i2b: // fall through 1408 case Bytecodes::_i2c: // fall through 1409 case Bytecodes::_i2s: tos_out = itos; break; 1410 case Bytecodes::_i2l: // fall through 1411 case Bytecodes::_f2l: // fall through 1412 case Bytecodes::_d2l: tos_out = ltos; break; 1413 case Bytecodes::_i2f: // fall through 1414 case Bytecodes::_l2f: // fall through 1415 case Bytecodes::_d2f: tos_out = ftos; break; 1416 case Bytecodes::_i2d: // fall through 1417 case Bytecodes::_l2d: // fall through 1418 case Bytecodes::_f2d: tos_out = dtos; break; 1419 default : ShouldNotReachHere(); 1420 } 1421 transition(tos_in, tos_out); 1422 #endif 1423 1424 // Conversion 1425 Label done; 1426 switch (bytecode()) { 1427 case Bytecodes::_i2l: 1428 __ extsw(R17_tos, R17_tos); 1429 break; 1430 1431 case Bytecodes::_l2i: 1432 // Nothing to do, we'll continue to work with the lower bits. 1433 break; 1434 1435 case Bytecodes::_i2b: 1436 __ extsb(R17_tos, R17_tos); 1437 break; 1438 1439 case Bytecodes::_i2c: 1440 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1441 break; 1442 1443 case Bytecodes::_i2s: 1444 __ extsh(R17_tos, R17_tos); 1445 break; 1446 1447 case Bytecodes::_i2d: 1448 __ extsw(R17_tos, R17_tos); 1449 case Bytecodes::_l2d: 1450 __ push_l_pop_d(); 1451 __ fcfid(F15_ftos, F15_ftos); 1452 break; 1453 1454 case Bytecodes::_i2f: 1455 __ extsw(R17_tos, R17_tos); 1456 __ push_l_pop_d(); 1457 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1458 // Comment: alternatively, load with sign extend could be done by lfiwax. 1459 __ fcfids(F15_ftos, F15_ftos); 1460 } else { 1461 __ fcfid(F15_ftos, F15_ftos); 1462 __ frsp(F15_ftos, F15_ftos); 1463 } 1464 break; 1465 1466 case Bytecodes::_l2f: 1467 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1468 __ push_l_pop_d(); 1469 __ fcfids(F15_ftos, F15_ftos); 1470 } else { 1471 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1472 __ mr(R3_ARG1, R17_tos); 1473 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1474 __ fmr(F15_ftos, F1_RET); 1475 } 1476 break; 1477 1478 case Bytecodes::_f2d: 1479 // empty 1480 break; 1481 1482 case Bytecodes::_d2f: 1483 __ frsp(F15_ftos, F15_ftos); 1484 break; 1485 1486 case Bytecodes::_d2i: 1487 case Bytecodes::_f2i: 1488 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1489 __ li(R17_tos, 0); // 0 in case of NAN 1490 __ bso(CCR0, done); 1491 __ fctiwz(F15_ftos, F15_ftos); 1492 __ push_d_pop_l(); 1493 break; 1494 1495 case Bytecodes::_d2l: 1496 case Bytecodes::_f2l: 1497 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1498 __ li(R17_tos, 0); // 0 in case of NAN 1499 __ bso(CCR0, done); 1500 __ fctidz(F15_ftos, F15_ftos); 1501 __ push_d_pop_l(); 1502 break; 1503 1504 default: ShouldNotReachHere(); 1505 } 1506 __ bind(done); 1507 } 1508 1509 // Long compare 1510 void TemplateTable::lcmp() { 1511 transition(ltos, itos); 1512 1513 const Register Rscratch = R11_scratch1; 1514 __ pop_l(Rscratch); // first operand, deeper in stack 1515 1516 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1517 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1518 __ srwi(Rscratch, R17_tos, 30); 1519 __ srawi(R17_tos, R17_tos, 31); 1520 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1521 } 1522 1523 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1524 // unordered_result == -1 => fcmpl or dcmpl 1525 // unordered_result == 1 => fcmpg or dcmpg 1526 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1527 const FloatRegister Rfirst = F0_SCRATCH, 1528 Rsecond = F15_ftos; 1529 const Register Rscratch = R11_scratch1; 1530 1531 if (is_float) { 1532 __ pop_f(Rfirst); 1533 } else { 1534 __ pop_d(Rfirst); 1535 } 1536 1537 Label Lunordered, Ldone; 1538 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1539 if (unordered_result) { 1540 __ bso(CCR0, Lunordered); 1541 } 1542 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1543 __ srwi(Rscratch, R17_tos, 30); 1544 __ srawi(R17_tos, R17_tos, 31); 1545 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1546 if (unordered_result) { 1547 __ b(Ldone); 1548 __ bind(Lunordered); 1549 __ load_const_optimized(R17_tos, unordered_result); 1550 } 1551 __ bind(Ldone); 1552 } 1553 1554 // Branch_conditional which takes TemplateTable::Condition. 1555 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1556 bool positive = false; 1557 Assembler::Condition cond = Assembler::equal; 1558 switch (cc) { 1559 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1560 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1561 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1562 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1563 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1564 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1565 default: ShouldNotReachHere(); 1566 } 1567 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1568 int bi = Assembler::bi0(crx, cond); 1569 __ bc(bo, bi, L); 1570 } 1571 1572 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1573 1574 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1575 __ verify_thread(); 1576 1577 const Register Rscratch1 = R11_scratch1, 1578 Rscratch2 = R12_scratch2, 1579 Rscratch3 = R3_ARG1, 1580 R4_counters = R4_ARG2, 1581 bumped_count = R31, 1582 Rdisp = R22_tmp2; 1583 1584 __ profile_taken_branch(Rscratch1, bumped_count); 1585 1586 // Get (wide) offset. 1587 if (is_wide) { 1588 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1589 } else { 1590 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1591 } 1592 1593 // -------------------------------------------------------------------------- 1594 // Handle all the JSR stuff here, then exit. 1595 // It's much shorter and cleaner than intermingling with the 1596 // non-JSR normal-branch stuff occurring below. 1597 if (is_jsr) { 1598 // Compute return address as bci in Otos_i. 1599 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1600 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1601 __ subf(R17_tos, Rscratch1, Rscratch2); 1602 1603 // Bump bcp to target of JSR. 1604 __ add(R14_bcp, Rdisp, R14_bcp); 1605 // Push returnAddress for "ret" on stack. 1606 __ push_ptr(R17_tos); 1607 // And away we go! 1608 __ dispatch_next(vtos); 1609 return; 1610 } 1611 1612 // -------------------------------------------------------------------------- 1613 // Normal (non-jsr) branch handling 1614 1615 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1616 if (increment_invocation_counter_for_backward_branches) { 1617 //__ unimplemented("branch invocation counter"); 1618 1619 Label Lforward; 1620 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1621 1622 // Check branch direction. 1623 __ cmpdi(CCR0, Rdisp, 0); 1624 __ bgt(CCR0, Lforward); 1625 1626 __ get_method_counters(R19_method, R4_counters, Lforward); 1627 1628 if (TieredCompilation) { 1629 Label Lno_mdo, Loverflow; 1630 const int increment = InvocationCounter::count_increment; 1631 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1632 if (ProfileInterpreter) { 1633 Register Rmdo = Rscratch1; 1634 1635 // If no method data exists, go to profile_continue. 1636 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1637 __ cmpdi(CCR0, Rmdo, 0); 1638 __ beq(CCR0, Lno_mdo); 1639 1640 // Increment backedge counter in the MDO. 1641 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1642 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1643 __ load_const_optimized(Rscratch3, mask, R0); 1644 __ addi(Rscratch2, Rscratch2, increment); 1645 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1646 __ and_(Rscratch3, Rscratch2, Rscratch3); 1647 __ bne(CCR0, Lforward); 1648 __ b(Loverflow); 1649 } 1650 1651 // If there's no MDO, increment counter in method. 1652 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1653 __ bind(Lno_mdo); 1654 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1655 __ load_const_optimized(Rscratch3, mask, R0); 1656 __ addi(Rscratch2, Rscratch2, increment); 1657 __ stw(Rscratch2, mo_bc_offs, R19_method); 1658 __ and_(Rscratch3, Rscratch2, Rscratch3); 1659 __ bne(CCR0, Lforward); 1660 1661 __ bind(Loverflow); 1662 1663 // Notify point for loop, pass branch bytecode. 1664 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1665 1666 // Was an OSR adapter generated? 1667 // O0 = osr nmethod 1668 __ cmpdi(CCR0, R3_RET, 0); 1669 __ beq(CCR0, Lforward); 1670 1671 // Has the nmethod been invalidated already? 1672 __ lwz(R0, nmethod::entry_bci_offset(), R3_RET); 1673 __ cmpwi(CCR0, R0, InvalidOSREntryBci); 1674 __ beq(CCR0, Lforward); 1675 1676 // Migrate the interpreter frame off of the stack. 1677 // We can use all registers because we will not return to interpreter from this point. 1678 1679 // Save nmethod. 1680 const Register osr_nmethod = R31; 1681 __ mr(osr_nmethod, R3_RET); 1682 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1683 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1684 __ reset_last_Java_frame(); 1685 // OSR buffer is in ARG1. 1686 1687 // Remove the interpreter frame. 1688 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1689 1690 // Jump to the osr code. 1691 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1692 __ mtlr(R0); 1693 __ mtctr(R11_scratch1); 1694 __ bctr(); 1695 1696 } else { 1697 1698 const Register invoke_ctr = Rscratch1; 1699 // Update Backedge branch separately from invocations. 1700 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1701 1702 if (ProfileInterpreter) { 1703 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1704 if (UseOnStackReplacement) { 1705 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1706 } 1707 } else { 1708 if (UseOnStackReplacement) { 1709 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1710 } 1711 } 1712 } 1713 1714 __ bind(Lforward); 1715 1716 } else { 1717 // Bump bytecode pointer by displacement (take the branch). 1718 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1719 } 1720 // Continue with bytecode @ target. 1721 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1722 // %%%%% and changing dispatch_next to dispatch_only. 1723 __ dispatch_next(vtos); 1724 } 1725 1726 // Helper function for if_cmp* methods below. 1727 // Factored out common compare and branch code. 1728 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1729 Label Lnot_taken; 1730 // Note: The condition code we get is the condition under which we 1731 // *fall through*! So we have to inverse the CC here. 1732 1733 if (is_jint) { 1734 if (cmp0) { 1735 __ cmpwi(CCR0, Rfirst, 0); 1736 } else { 1737 __ cmpw(CCR0, Rfirst, Rsecond); 1738 } 1739 } else { 1740 if (cmp0) { 1741 __ cmpdi(CCR0, Rfirst, 0); 1742 } else { 1743 __ cmpd(CCR0, Rfirst, Rsecond); 1744 } 1745 } 1746 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1747 1748 // Conition is false => Jump! 1749 branch(false, false); 1750 1751 // Condition is not true => Continue. 1752 __ align(32, 12); 1753 __ bind(Lnot_taken); 1754 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1755 } 1756 1757 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1758 void TemplateTable::if_0cmp(Condition cc) { 1759 transition(itos, vtos); 1760 1761 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1762 } 1763 1764 // Compare integer values and fall through if CC holds, branch away otherwise. 1765 // 1766 // Interface: 1767 // - Rfirst: First operand (older stack value) 1768 // - tos: Second operand (younger stack value) 1769 void TemplateTable::if_icmp(Condition cc) { 1770 transition(itos, vtos); 1771 1772 const Register Rfirst = R0, 1773 Rsecond = R17_tos; 1774 1775 __ pop_i(Rfirst); 1776 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1777 } 1778 1779 void TemplateTable::if_nullcmp(Condition cc) { 1780 transition(atos, vtos); 1781 1782 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1783 } 1784 1785 void TemplateTable::if_acmp(Condition cc) { 1786 transition(atos, vtos); 1787 1788 const Register Rfirst = R0, 1789 Rsecond = R17_tos; 1790 1791 __ pop_ptr(Rfirst); 1792 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1793 } 1794 1795 void TemplateTable::ret() { 1796 locals_index(R11_scratch1); 1797 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1798 1799 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1800 1801 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1802 __ add(R11_scratch1, R17_tos, R11_scratch1); 1803 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1804 __ dispatch_next(vtos); 1805 } 1806 1807 void TemplateTable::wide_ret() { 1808 transition(vtos, vtos); 1809 1810 const Register Rindex = R3_ARG1, 1811 Rscratch1 = R11_scratch1, 1812 Rscratch2 = R12_scratch2; 1813 1814 locals_index_wide(Rindex); 1815 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1816 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1817 // Tos now contains the bci, compute the bcp from that. 1818 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1819 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1820 __ add(R14_bcp, Rscratch1, Rscratch2); 1821 __ dispatch_next(vtos); 1822 } 1823 1824 void TemplateTable::tableswitch() { 1825 transition(itos, vtos); 1826 1827 Label Ldispatch, Ldefault_case; 1828 Register Rlow_byte = R3_ARG1, 1829 Rindex = Rlow_byte, 1830 Rhigh_byte = R4_ARG2, 1831 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1832 Rscratch1 = R11_scratch1, 1833 Rscratch2 = R12_scratch2, 1834 Roffset = R6_ARG4; 1835 1836 // Align bcp. 1837 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1838 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1839 1840 // Load lo & hi. 1841 __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr); 1842 __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr); 1843 1844 // Check for default case (=index outside [low,high]). 1845 __ cmpw(CCR0, R17_tos, Rlow_byte); 1846 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1847 __ blt(CCR0, Ldefault_case); 1848 __ bgt(CCR1, Ldefault_case); 1849 1850 // Lookup dispatch offset. 1851 __ sub(Rindex, R17_tos, Rlow_byte); 1852 __ extsw(Rindex, Rindex); 1853 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1854 __ sldi(Rindex, Rindex, LogBytesPerInt); 1855 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1856 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1857 __ b(Ldispatch); 1858 1859 __ bind(Ldefault_case); 1860 __ profile_switch_default(Rhigh_byte, Rscratch1); 1861 __ lwa(Roffset, 0, Rdef_offset_addr); 1862 1863 __ bind(Ldispatch); 1864 1865 __ add(R14_bcp, Roffset, R14_bcp); 1866 __ dispatch_next(vtos); 1867 } 1868 1869 void TemplateTable::lookupswitch() { 1870 transition(itos, itos); 1871 __ stop("lookupswitch bytecode should have been rewritten"); 1872 } 1873 1874 // Table switch using linear search through cases. 1875 // Bytecode stream format: 1876 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1877 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1878 void TemplateTable::fast_linearswitch() { 1879 transition(itos, vtos); 1880 1881 Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case; 1882 1883 Register Rcount = R3_ARG1, 1884 Rcurrent_pair = R4_ARG2, 1885 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1886 Roffset = R31, // Might need to survive C call. 1887 Rvalue = R12_scratch2, 1888 Rscratch = R11_scratch1, 1889 Rcmp_value = R17_tos; 1890 1891 // Align bcp. 1892 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1893 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1894 1895 // Setup loop counter and limit. 1896 __ lwz(Rcount, BytesPerInt, Rdef_offset_addr); // Load count. 1897 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1898 1899 // Set up search loop. 1900 __ cmpwi(CCR0, Rcount, 0); 1901 __ beq(CCR0, Ldefault_case); 1902 1903 __ mtctr(Rcount); 1904 1905 // linear table search 1906 __ bind(Lsearch_loop); 1907 1908 __ lwz(Rvalue, 0, Rcurrent_pair); 1909 __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair); 1910 1911 __ cmpw(CCR0, Rvalue, Rcmp_value); 1912 __ beq(CCR0, Lfound); 1913 1914 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1915 __ bdnz(Lsearch_loop); 1916 1917 // default case 1918 __ bind(Ldefault_case); 1919 1920 __ lwa(Roffset, 0, Rdef_offset_addr); 1921 if (ProfileInterpreter) { 1922 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1923 __ b(Lcontinue_execution); 1924 } 1925 1926 // Entry found, skip Roffset bytecodes and continue. 1927 __ bind(Lfound); 1928 if (ProfileInterpreter) { 1929 // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints 1930 // beyond the actual current pair due to the auto update load above! 1931 __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr); 1932 __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt); 1933 __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1); 1934 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1935 __ bind(Lcontinue_execution); 1936 } 1937 __ add(R14_bcp, Roffset, R14_bcp); 1938 __ dispatch_next(vtos); 1939 } 1940 1941 // Table switch using binary search (value/offset pairs are ordered). 1942 // Bytecode stream format: 1943 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1944 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1945 void TemplateTable::fast_binaryswitch() { 1946 1947 transition(itos, vtos); 1948 // Implementation using the following core algorithm: (copied from Intel) 1949 // 1950 // int binary_search(int key, LookupswitchPair* array, int n) { 1951 // // Binary search according to "Methodik des Programmierens" by 1952 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1953 // int i = 0; 1954 // int j = n; 1955 // while (i+1 < j) { 1956 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1957 // // with Q: for all i: 0 <= i < n: key < a[i] 1958 // // where a stands for the array and assuming that the (inexisting) 1959 // // element a[n] is infinitely big. 1960 // int h = (i + j) >> 1; 1961 // // i < h < j 1962 // if (key < array[h].fast_match()) { 1963 // j = h; 1964 // } else { 1965 // i = h; 1966 // } 1967 // } 1968 // // R: a[i] <= key < a[i+1] or Q 1969 // // (i.e., if key is within array, i is the correct index) 1970 // return i; 1971 // } 1972 1973 // register allocation 1974 const Register Rkey = R17_tos; // already set (tosca) 1975 const Register Rarray = R3_ARG1; 1976 const Register Ri = R4_ARG2; 1977 const Register Rj = R5_ARG3; 1978 const Register Rh = R6_ARG4; 1979 const Register Rscratch = R11_scratch1; 1980 1981 const int log_entry_size = 3; 1982 const int entry_size = 1 << log_entry_size; 1983 1984 Label found; 1985 1986 // Find Array start, 1987 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1988 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1989 1990 // initialize i & j 1991 __ li(Ri,0); 1992 __ lwz(Rj, -BytesPerInt, Rarray); 1993 1994 // and start. 1995 Label entry; 1996 __ b(entry); 1997 1998 // binary search loop 1999 { Label loop; 2000 __ bind(loop); 2001 // int h = (i + j) >> 1; 2002 __ srdi(Rh, Rh, 1); 2003 // if (key < array[h].fast_match()) { 2004 // j = h; 2005 // } else { 2006 // i = h; 2007 // } 2008 __ sldi(Rscratch, Rh, log_entry_size); 2009 __ lwzx(Rscratch, Rscratch, Rarray); 2010 2011 // if (key < current value) 2012 // Rh = Rj 2013 // else 2014 // Rh = Ri 2015 Label Lgreater; 2016 __ cmpw(CCR0, Rkey, Rscratch); 2017 __ bge(CCR0, Lgreater); 2018 __ mr(Rj, Rh); 2019 __ b(entry); 2020 __ bind(Lgreater); 2021 __ mr(Ri, Rh); 2022 2023 // while (i+1 < j) 2024 __ bind(entry); 2025 __ addi(Rscratch, Ri, 1); 2026 __ cmpw(CCR0, Rscratch, Rj); 2027 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2028 2029 __ blt(CCR0, loop); 2030 } 2031 2032 // End of binary search, result index is i (must check again!). 2033 Label default_case; 2034 Label continue_execution; 2035 if (ProfileInterpreter) { 2036 __ mr(Rh, Ri); // Save index in i for profiling. 2037 } 2038 // Ri = value offset 2039 __ sldi(Ri, Ri, log_entry_size); 2040 __ add(Ri, Ri, Rarray); 2041 __ lwz(Rscratch, 0, Ri); 2042 2043 Label not_found; 2044 // Ri = offset offset 2045 __ cmpw(CCR0, Rkey, Rscratch); 2046 __ beq(CCR0, not_found); 2047 // entry not found -> j = default offset 2048 __ lwz(Rj, -2 * BytesPerInt, Rarray); 2049 __ b(default_case); 2050 2051 __ bind(not_found); 2052 // entry found -> j = offset 2053 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2054 __ lwz(Rj, BytesPerInt, Ri); 2055 2056 if (ProfileInterpreter) { 2057 __ b(continue_execution); 2058 } 2059 2060 __ bind(default_case); // fall through (if not profiling) 2061 __ profile_switch_default(Ri, Rscratch); 2062 2063 __ bind(continue_execution); 2064 2065 __ extsw(Rj, Rj); 2066 __ add(R14_bcp, Rj, R14_bcp); 2067 __ dispatch_next(vtos); 2068 } 2069 2070 void TemplateTable::_return(TosState state) { 2071 transition(state, state); 2072 assert(_desc->calls_vm(), 2073 "inconsistent calls_vm information"); // call in remove_activation 2074 2075 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2076 2077 Register Rscratch = R11_scratch1, 2078 Rklass = R12_scratch2, 2079 Rklass_flags = Rklass; 2080 Label Lskip_register_finalizer; 2081 2082 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2083 assert(state == vtos, "only valid state"); 2084 __ ld(R17_tos, 0, R18_locals); 2085 2086 // Load klass of this obj. 2087 __ load_klass(Rklass, R17_tos); 2088 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2089 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2090 __ bfalse(CCR0, Lskip_register_finalizer); 2091 2092 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2093 2094 __ align(32, 12); 2095 __ bind(Lskip_register_finalizer); 2096 } 2097 2098 // Move the result value into the correct register and remove memory stack frame. 2099 __ remove_activation(state, /* throw_monitor_exception */ true); 2100 // Restoration of lr done by remove_activation. 2101 switch (state) { 2102 case ltos: 2103 case btos: 2104 case ctos: 2105 case stos: 2106 case atos: 2107 case itos: __ mr(R3_RET, R17_tos); break; 2108 case ftos: 2109 case dtos: __ fmr(F1_RET, F15_ftos); break; 2110 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2111 // to get visible before the reference to the object gets stored anywhere. 2112 __ membar(Assembler::StoreStore); break; 2113 default : ShouldNotReachHere(); 2114 } 2115 __ blr(); 2116 } 2117 2118 // ============================================================================ 2119 // Constant pool cache access 2120 // 2121 // Memory ordering: 2122 // 2123 // Like done in C++ interpreter, we load the fields 2124 // - _indices 2125 // - _f12_oop 2126 // acquired, because these are asked if the cache is already resolved. We don't 2127 // want to float loads above this check. 2128 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2129 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2130 2131 // Call into the VM if call site is not yet resolved 2132 // 2133 // Input regs: 2134 // - None, all passed regs are outputs. 2135 // 2136 // Returns: 2137 // - Rcache: The const pool cache entry that contains the resolved result. 2138 // - Rresult: Either noreg or output for f1/f2. 2139 // 2140 // Kills: 2141 // - Rscratch 2142 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2143 2144 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2145 Label Lresolved, Ldone; 2146 2147 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2148 // We are resolved if the indices offset contains the current bytecode. 2149 // Big Endian: 2150 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2151 // Acquire by cmp-br-isync (see below). 2152 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2153 __ beq(CCR0, Lresolved); 2154 2155 address entry = NULL; 2156 switch (bytecode()) { 2157 case Bytecodes::_getstatic : // fall through 2158 case Bytecodes::_putstatic : // fall through 2159 case Bytecodes::_getfield : // fall through 2160 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2161 case Bytecodes::_invokevirtual : // fall through 2162 case Bytecodes::_invokespecial : // fall through 2163 case Bytecodes::_invokestatic : // fall through 2164 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2165 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2166 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2167 default : ShouldNotReachHere(); break; 2168 } 2169 __ li(R4_ARG2, (int)bytecode()); 2170 __ call_VM(noreg, entry, R4_ARG2, true); 2171 2172 // Update registers with resolved info. 2173 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2174 __ b(Ldone); 2175 2176 __ bind(Lresolved); 2177 __ isync(); // Order load wrt. succeeding loads. 2178 __ bind(Ldone); 2179 } 2180 2181 // Load the constant pool cache entry at field accesses into registers. 2182 // The Rcache and Rindex registers must be set before call. 2183 // Input: 2184 // - Rcache, Rindex 2185 // Output: 2186 // - Robj, Roffset, Rflags 2187 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2188 Register Rcache, 2189 Register Rindex /* unused on PPC64 */, 2190 Register Roffset, 2191 Register Rflags, 2192 bool is_static = false) { 2193 assert_different_registers(Rcache, Rflags, Roffset); 2194 // assert(Rindex == noreg, "parameter not used on PPC64"); 2195 2196 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2197 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2198 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2199 if (is_static) { 2200 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2201 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2202 // Acquire not needed here. Following access has an address dependency on this value. 2203 } 2204 } 2205 2206 // Load the constant pool cache entry at invokes into registers. 2207 // Resolve if necessary. 2208 2209 // Input Registers: 2210 // - None, bcp is used, though 2211 // 2212 // Return registers: 2213 // - Rmethod (f1 field or f2 if invokevirtual) 2214 // - Ritable_index (f2 field) 2215 // - Rflags (flags field) 2216 // 2217 // Kills: 2218 // - R21 2219 // 2220 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2221 Register Rmethod, 2222 Register Ritable_index, 2223 Register Rflags, 2224 bool is_invokevirtual, 2225 bool is_invokevfinal, 2226 bool is_invokedynamic) { 2227 2228 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2229 // Determine constant pool cache field offsets. 2230 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2231 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2232 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2233 // Access constant pool cache fields. 2234 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2235 2236 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2237 2238 if (is_invokevfinal) { 2239 assert(Ritable_index == noreg, "register not used"); 2240 // Already resolved. 2241 __ get_cache_and_index_at_bcp(Rcache, 1); 2242 } else { 2243 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2244 } 2245 2246 __ ld(Rmethod, method_offset, Rcache); 2247 __ ld(Rflags, flags_offset, Rcache); 2248 2249 if (Ritable_index != noreg) { 2250 __ ld(Ritable_index, index_offset, Rcache); 2251 } 2252 } 2253 2254 // ============================================================================ 2255 // Field access 2256 2257 // Volatile variables demand their effects be made known to all CPU's 2258 // in order. Store buffers on most chips allow reads & writes to 2259 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2260 // without some kind of memory barrier (i.e., it's not sufficient that 2261 // the interpreter does not reorder volatile references, the hardware 2262 // also must not reorder them). 2263 // 2264 // According to the new Java Memory Model (JMM): 2265 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2266 // writes act as aquire & release, so: 2267 // (2) A read cannot let unrelated NON-volatile memory refs that 2268 // happen after the read float up to before the read. It's OK for 2269 // non-volatile memory refs that happen before the volatile read to 2270 // float down below it. 2271 // (3) Similar a volatile write cannot let unrelated NON-volatile 2272 // memory refs that happen BEFORE the write float down to after the 2273 // write. It's OK for non-volatile memory refs that happen after the 2274 // volatile write to float up before it. 2275 // 2276 // We only put in barriers around volatile refs (they are expensive), 2277 // not _between_ memory refs (that would require us to track the 2278 // flavor of the previous memory refs). Requirements (2) and (3) 2279 // require some barriers before volatile stores and after volatile 2280 // loads. These nearly cover requirement (1) but miss the 2281 // volatile-store-volatile-load case. This final case is placed after 2282 // volatile-stores although it could just as well go before 2283 // volatile-loads. 2284 2285 // The registers cache and index expected to be set before call. 2286 // Correct values of the cache and index registers are preserved. 2287 // Kills: 2288 // Rcache (if has_tos) 2289 // Rscratch 2290 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2291 2292 assert_different_registers(Rcache, Rscratch); 2293 2294 if (JvmtiExport::can_post_field_access()) { 2295 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2296 Label Lno_field_access_post; 2297 2298 // Check if post field access in enabled. 2299 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2300 __ lwz(Rscratch, offs, Rscratch); 2301 2302 __ cmpwi(CCR0, Rscratch, 0); 2303 __ beq(CCR0, Lno_field_access_post); 2304 2305 // Post access enabled - do it! 2306 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2307 if (is_static) { 2308 __ li(R17_tos, 0); 2309 } else { 2310 if (has_tos) { 2311 // The fast bytecode versions have obj ptr in register. 2312 // Thus, save object pointer before call_VM() clobbers it 2313 // put object on tos where GC wants it. 2314 __ push_ptr(R17_tos); 2315 } else { 2316 // Load top of stack (do not pop the value off the stack). 2317 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2318 } 2319 __ verify_oop(R17_tos); 2320 } 2321 // tos: object pointer or NULL if static 2322 // cache: cache entry pointer 2323 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2324 if (!is_static && has_tos) { 2325 // Restore object pointer. 2326 __ pop_ptr(R17_tos); 2327 __ verify_oop(R17_tos); 2328 } else { 2329 // Cache is still needed to get class or obj. 2330 __ get_cache_and_index_at_bcp(Rcache, 1); 2331 } 2332 2333 __ align(32, 12); 2334 __ bind(Lno_field_access_post); 2335 } 2336 } 2337 2338 // kills R11_scratch1 2339 void TemplateTable::pop_and_check_object(Register Roop) { 2340 Register Rtmp = R11_scratch1; 2341 2342 assert_different_registers(Rtmp, Roop); 2343 __ pop_ptr(Roop); 2344 // For field access must check obj. 2345 __ null_check_throw(Roop, -1, Rtmp); 2346 __ verify_oop(Roop); 2347 } 2348 2349 // PPC64: implement volatile loads as fence-store-acquire. 2350 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2351 transition(vtos, vtos); 2352 2353 Label Lacquire, Lisync; 2354 2355 const Register Rcache = R3_ARG1, 2356 Rclass_or_obj = R22_tmp2, 2357 Roffset = R23_tmp3, 2358 Rflags = R31, 2359 Rbtable = R5_ARG3, 2360 Rbc = R6_ARG4, 2361 Rscratch = R12_scratch2; 2362 2363 static address field_branch_table[number_of_states], 2364 static_branch_table[number_of_states]; 2365 2366 address* branch_table = is_static ? static_branch_table : field_branch_table; 2367 2368 // Get field offset. 2369 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2370 2371 // JVMTI support 2372 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2373 2374 // Load after possible GC. 2375 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2376 2377 // Load pointer to branch table. 2378 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2379 2380 // Get volatile flag. 2381 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2382 // Note: sync is needed before volatile load on PPC64. 2383 2384 // Check field type. 2385 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2386 2387 #ifdef ASSERT 2388 Label LFlagInvalid; 2389 __ cmpldi(CCR0, Rflags, number_of_states); 2390 __ bge(CCR0, LFlagInvalid); 2391 #endif 2392 2393 // Load from branch table and dispatch (volatile case: one instruction ahead). 2394 __ sldi(Rflags, Rflags, LogBytesPerWord); 2395 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2396 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2397 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2398 } 2399 __ ldx(Rbtable, Rbtable, Rflags); 2400 2401 // Get the obj from stack. 2402 if (!is_static) { 2403 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2404 } else { 2405 __ verify_oop(Rclass_or_obj); 2406 } 2407 2408 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2409 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2410 } 2411 __ mtctr(Rbtable); 2412 __ bctr(); 2413 2414 #ifdef ASSERT 2415 __ bind(LFlagInvalid); 2416 __ stop("got invalid flag", 0x654); 2417 2418 // __ bind(Lvtos); 2419 address pc_before_fence = __ pc(); 2420 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2421 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2422 assert(branch_table[vtos] == 0, "can't compute twice"); 2423 branch_table[vtos] = __ pc(); // non-volatile_entry point 2424 __ stop("vtos unexpected", 0x655); 2425 #endif 2426 2427 __ align(32, 28, 28); // Align load. 2428 // __ bind(Ldtos); 2429 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2430 assert(branch_table[dtos] == 0, "can't compute twice"); 2431 branch_table[dtos] = __ pc(); // non-volatile_entry point 2432 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2433 __ push(dtos); 2434 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2435 { 2436 Label acquire_double; 2437 __ beq(CCR6, acquire_double); // Volatile? 2438 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2439 2440 __ bind(acquire_double); 2441 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2442 __ beq_predict_taken(CCR0, Lisync); 2443 __ b(Lisync); // In case of NAN. 2444 } 2445 2446 __ align(32, 28, 28); // Align load. 2447 // __ bind(Lftos); 2448 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2449 assert(branch_table[ftos] == 0, "can't compute twice"); 2450 branch_table[ftos] = __ pc(); // non-volatile_entry point 2451 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2452 __ push(ftos); 2453 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2454 { 2455 Label acquire_float; 2456 __ beq(CCR6, acquire_float); // Volatile? 2457 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2458 2459 __ bind(acquire_float); 2460 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2461 __ beq_predict_taken(CCR0, Lisync); 2462 __ b(Lisync); // In case of NAN. 2463 } 2464 2465 __ align(32, 28, 28); // Align load. 2466 // __ bind(Litos); 2467 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2468 assert(branch_table[itos] == 0, "can't compute twice"); 2469 branch_table[itos] = __ pc(); // non-volatile_entry point 2470 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2471 __ push(itos); 2472 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2473 __ beq(CCR6, Lacquire); // Volatile? 2474 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2475 2476 __ align(32, 28, 28); // Align load. 2477 // __ bind(Lltos); 2478 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2479 assert(branch_table[ltos] == 0, "can't compute twice"); 2480 branch_table[ltos] = __ pc(); // non-volatile_entry point 2481 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2482 __ push(ltos); 2483 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2484 __ beq(CCR6, Lacquire); // Volatile? 2485 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2486 2487 __ align(32, 28, 28); // Align load. 2488 // __ bind(Lbtos); 2489 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2490 assert(branch_table[btos] == 0, "can't compute twice"); 2491 branch_table[btos] = __ pc(); // non-volatile_entry point 2492 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2493 __ extsb(R17_tos, R17_tos); 2494 __ push(btos); 2495 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2496 __ beq(CCR6, Lacquire); // Volatile? 2497 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2498 2499 __ align(32, 28, 28); // Align load. 2500 // __ bind(Lctos); 2501 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2502 assert(branch_table[ctos] == 0, "can't compute twice"); 2503 branch_table[ctos] = __ pc(); // non-volatile_entry point 2504 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2505 __ push(ctos); 2506 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2507 __ beq(CCR6, Lacquire); // Volatile? 2508 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2509 2510 __ align(32, 28, 28); // Align load. 2511 // __ bind(Lstos); 2512 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2513 assert(branch_table[stos] == 0, "can't compute twice"); 2514 branch_table[stos] = __ pc(); // non-volatile_entry point 2515 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2516 __ push(stos); 2517 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2518 __ beq(CCR6, Lacquire); // Volatile? 2519 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2520 2521 __ align(32, 28, 28); // Align load. 2522 // __ bind(Latos); 2523 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2524 assert(branch_table[atos] == 0, "can't compute twice"); 2525 branch_table[atos] = __ pc(); // non-volatile_entry point 2526 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2527 __ verify_oop(R17_tos); 2528 __ push(atos); 2529 //__ dcbt(R17_tos); // prefetch 2530 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2531 __ beq(CCR6, Lacquire); // Volatile? 2532 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2533 2534 __ align(32, 12); 2535 __ bind(Lacquire); 2536 __ twi_0(R17_tos); 2537 __ bind(Lisync); 2538 __ isync(); // acquire 2539 2540 #ifdef ASSERT 2541 for (int i = 0; i<number_of_states; ++i) { 2542 assert(branch_table[i], "get initialization"); 2543 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2544 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2545 } 2546 #endif 2547 } 2548 2549 void TemplateTable::getfield(int byte_no) { 2550 getfield_or_static(byte_no, false); 2551 } 2552 2553 void TemplateTable::getstatic(int byte_no) { 2554 getfield_or_static(byte_no, true); 2555 } 2556 2557 // The registers cache and index expected to be set before call. 2558 // The function may destroy various registers, just not the cache and index registers. 2559 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2560 2561 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2562 2563 if (JvmtiExport::can_post_field_modification()) { 2564 Label Lno_field_mod_post; 2565 2566 // Check if post field access in enabled. 2567 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2568 __ lwz(Rscratch, offs, Rscratch); 2569 2570 __ cmpwi(CCR0, Rscratch, 0); 2571 __ beq(CCR0, Lno_field_mod_post); 2572 2573 // Do the post 2574 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2575 const Register Robj = Rscratch; 2576 2577 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2578 if (is_static) { 2579 // Life is simple. Null out the object pointer. 2580 __ li(Robj, 0); 2581 } else { 2582 // In case of the fast versions, value lives in registers => put it back on tos. 2583 int offs = Interpreter::expr_offset_in_bytes(0); 2584 Register base = R15_esp; 2585 switch(bytecode()) { 2586 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2587 case Bytecodes::_fast_iputfield: // Fall through 2588 case Bytecodes::_fast_bputfield: // Fall through 2589 case Bytecodes::_fast_cputfield: // Fall through 2590 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2591 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2592 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2593 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2594 default: { 2595 offs = 0; 2596 base = Robj; 2597 const Register Rflags = Robj; 2598 Label is_one_slot; 2599 // Life is harder. The stack holds the value on top, followed by the 2600 // object. We don't know the size of the value, though; it could be 2601 // one or two words depending on its type. As a result, we must find 2602 // the type to determine where the object is. 2603 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2604 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2605 2606 __ cmpwi(CCR0, Rflags, ltos); 2607 __ cmpwi(CCR1, Rflags, dtos); 2608 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2609 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 2610 __ beq(CCR0, is_one_slot); 2611 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2612 __ bind(is_one_slot); 2613 break; 2614 } 2615 } 2616 __ ld(Robj, offs, base); 2617 __ verify_oop(Robj); 2618 } 2619 2620 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2621 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2622 __ get_cache_and_index_at_bcp(Rcache, 1); 2623 2624 // In case of the fast versions, value lives in registers => put it back on tos. 2625 switch(bytecode()) { 2626 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2627 case Bytecodes::_fast_iputfield: // Fall through 2628 case Bytecodes::_fast_bputfield: // Fall through 2629 case Bytecodes::_fast_cputfield: // Fall through 2630 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2631 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2632 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2633 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2634 default: break; // Nothin' to do. 2635 } 2636 2637 __ align(32, 12); 2638 __ bind(Lno_field_mod_post); 2639 } 2640 } 2641 2642 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2643 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2644 Label Lvolatile; 2645 2646 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2647 Rclass_or_obj = R31, // Needs to survive C call. 2648 Roffset = R22_tmp2, // Needs to survive C call. 2649 Rflags = R3_ARG1, 2650 Rbtable = R4_ARG2, 2651 Rscratch = R11_scratch1, 2652 Rscratch2 = R12_scratch2, 2653 Rscratch3 = R6_ARG4, 2654 Rbc = Rscratch3; 2655 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2656 2657 static address field_branch_table[number_of_states], 2658 static_branch_table[number_of_states]; 2659 2660 address* branch_table = is_static ? static_branch_table : field_branch_table; 2661 2662 // Stack (grows up): 2663 // value 2664 // obj 2665 2666 // Load the field offset. 2667 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2668 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2669 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2670 2671 // Load pointer to branch table. 2672 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2673 2674 // Get volatile flag. 2675 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2676 2677 // Check the field type. 2678 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2679 2680 #ifdef ASSERT 2681 Label LFlagInvalid; 2682 __ cmpldi(CCR0, Rflags, number_of_states); 2683 __ bge(CCR0, LFlagInvalid); 2684 #endif 2685 2686 // Load from branch table and dispatch (volatile case: one instruction ahead). 2687 __ sldi(Rflags, Rflags, LogBytesPerWord); 2688 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2689 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2690 __ ldx(Rbtable, Rbtable, Rflags); 2691 2692 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2693 __ mtctr(Rbtable); 2694 __ bctr(); 2695 2696 #ifdef ASSERT 2697 __ bind(LFlagInvalid); 2698 __ stop("got invalid flag", 0x656); 2699 2700 // __ bind(Lvtos); 2701 address pc_before_release = __ pc(); 2702 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2703 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2704 assert(branch_table[vtos] == 0, "can't compute twice"); 2705 branch_table[vtos] = __ pc(); // non-volatile_entry point 2706 __ stop("vtos unexpected", 0x657); 2707 #endif 2708 2709 __ align(32, 28, 28); // Align pop. 2710 // __ bind(Ldtos); 2711 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2712 assert(branch_table[dtos] == 0, "can't compute twice"); 2713 branch_table[dtos] = __ pc(); // non-volatile_entry point 2714 __ pop(dtos); 2715 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2716 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2717 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2718 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2719 __ beq(CR_is_vol, Lvolatile); // Volatile? 2720 } 2721 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2722 2723 __ align(32, 28, 28); // Align pop. 2724 // __ bind(Lftos); 2725 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2726 assert(branch_table[ftos] == 0, "can't compute twice"); 2727 branch_table[ftos] = __ pc(); // non-volatile_entry point 2728 __ pop(ftos); 2729 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2730 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2731 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2732 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2733 __ beq(CR_is_vol, Lvolatile); // Volatile? 2734 } 2735 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2736 2737 __ align(32, 28, 28); // Align pop. 2738 // __ bind(Litos); 2739 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2740 assert(branch_table[itos] == 0, "can't compute twice"); 2741 branch_table[itos] = __ pc(); // non-volatile_entry point 2742 __ pop(itos); 2743 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2744 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2745 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2746 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2747 __ beq(CR_is_vol, Lvolatile); // Volatile? 2748 } 2749 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2750 2751 __ align(32, 28, 28); // Align pop. 2752 // __ bind(Lltos); 2753 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2754 assert(branch_table[ltos] == 0, "can't compute twice"); 2755 branch_table[ltos] = __ pc(); // non-volatile_entry point 2756 __ pop(ltos); 2757 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2758 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2759 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2760 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2761 __ beq(CR_is_vol, Lvolatile); // Volatile? 2762 } 2763 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2764 2765 __ align(32, 28, 28); // Align pop. 2766 // __ bind(Lbtos); 2767 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2768 assert(branch_table[btos] == 0, "can't compute twice"); 2769 branch_table[btos] = __ pc(); // non-volatile_entry point 2770 __ pop(btos); 2771 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2772 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2773 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2774 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2775 __ beq(CR_is_vol, Lvolatile); // Volatile? 2776 } 2777 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2778 2779 __ align(32, 28, 28); // Align pop. 2780 // __ bind(Lctos); 2781 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2782 assert(branch_table[ctos] == 0, "can't compute twice"); 2783 branch_table[ctos] = __ pc(); // non-volatile_entry point 2784 __ pop(ctos); 2785 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2786 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2787 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2788 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2789 __ beq(CR_is_vol, Lvolatile); // Volatile? 2790 } 2791 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2792 2793 __ align(32, 28, 28); // Align pop. 2794 // __ bind(Lstos); 2795 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2796 assert(branch_table[stos] == 0, "can't compute twice"); 2797 branch_table[stos] = __ pc(); // non-volatile_entry point 2798 __ pop(stos); 2799 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2800 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2801 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2802 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2803 __ beq(CR_is_vol, Lvolatile); // Volatile? 2804 } 2805 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2806 2807 __ align(32, 28, 28); // Align pop. 2808 // __ bind(Latos); 2809 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2810 assert(branch_table[atos] == 0, "can't compute twice"); 2811 branch_table[atos] = __ pc(); // non-volatile_entry point 2812 __ pop(atos); 2813 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2814 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2815 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2816 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2817 __ beq(CR_is_vol, Lvolatile); // Volatile? 2818 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2819 2820 __ align(32, 12); 2821 __ bind(Lvolatile); 2822 __ fence(); 2823 } 2824 // fallthru: __ b(Lexit); 2825 2826 #ifdef ASSERT 2827 for (int i = 0; i<number_of_states; ++i) { 2828 assert(branch_table[i], "put initialization"); 2829 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2830 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2831 } 2832 #endif 2833 } 2834 2835 void TemplateTable::putfield(int byte_no) { 2836 putfield_or_static(byte_no, false); 2837 } 2838 2839 void TemplateTable::putstatic(int byte_no) { 2840 putfield_or_static(byte_no, true); 2841 } 2842 2843 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2844 void TemplateTable::jvmti_post_fast_field_mod() { 2845 __ should_not_reach_here(); 2846 } 2847 2848 void TemplateTable::fast_storefield(TosState state) { 2849 transition(state, vtos); 2850 2851 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2852 Rclass_or_obj = R31, // Needs to survive C call. 2853 Roffset = R22_tmp2, // Needs to survive C call. 2854 Rflags = R3_ARG1, 2855 Rscratch = R11_scratch1, 2856 Rscratch2 = R12_scratch2, 2857 Rscratch3 = R4_ARG2; 2858 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2859 2860 // Constant pool already resolved => Load flags and offset of field. 2861 __ get_cache_and_index_at_bcp(Rcache, 1); 2862 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2863 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2864 2865 // Get the obj and the final store addr. 2866 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2867 2868 // Get volatile flag. 2869 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2870 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2871 { 2872 Label LnotVolatile; 2873 __ beq(CCR0, LnotVolatile); 2874 __ release(); 2875 __ align(32, 12); 2876 __ bind(LnotVolatile); 2877 } 2878 2879 // Do the store and fencing. 2880 switch(bytecode()) { 2881 case Bytecodes::_fast_aputfield: 2882 // Store into the field. 2883 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2884 break; 2885 2886 case Bytecodes::_fast_iputfield: 2887 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2888 break; 2889 2890 case Bytecodes::_fast_lputfield: 2891 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2892 break; 2893 2894 case Bytecodes::_fast_bputfield: 2895 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2896 break; 2897 2898 case Bytecodes::_fast_cputfield: 2899 case Bytecodes::_fast_sputfield: 2900 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2901 break; 2902 2903 case Bytecodes::_fast_fputfield: 2904 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2905 break; 2906 2907 case Bytecodes::_fast_dputfield: 2908 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2909 break; 2910 2911 default: ShouldNotReachHere(); 2912 } 2913 2914 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2915 Label LVolatile; 2916 __ beq(CR_is_vol, LVolatile); 2917 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2918 2919 __ align(32, 12); 2920 __ bind(LVolatile); 2921 __ fence(); 2922 } 2923 } 2924 2925 void TemplateTable::fast_accessfield(TosState state) { 2926 transition(atos, state); 2927 2928 Label LisVolatile; 2929 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2930 2931 const Register Rcache = R3_ARG1, 2932 Rclass_or_obj = R17_tos, 2933 Roffset = R22_tmp2, 2934 Rflags = R23_tmp3, 2935 Rscratch = R12_scratch2; 2936 2937 // Constant pool already resolved. Get the field offset. 2938 __ get_cache_and_index_at_bcp(Rcache, 1); 2939 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2940 2941 // JVMTI support 2942 jvmti_post_field_access(Rcache, Rscratch, false, true); 2943 2944 // Get the load address. 2945 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 2946 2947 // Get volatile flag. 2948 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2949 __ bne(CCR0, LisVolatile); 2950 2951 switch(bytecode()) { 2952 case Bytecodes::_fast_agetfield: 2953 { 2954 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2955 __ verify_oop(R17_tos); 2956 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2957 2958 __ bind(LisVolatile); 2959 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2960 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2961 __ verify_oop(R17_tos); 2962 __ twi_0(R17_tos); 2963 __ isync(); 2964 break; 2965 } 2966 case Bytecodes::_fast_igetfield: 2967 { 2968 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2969 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2970 2971 __ bind(LisVolatile); 2972 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2973 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2974 __ twi_0(R17_tos); 2975 __ isync(); 2976 break; 2977 } 2978 case Bytecodes::_fast_lgetfield: 2979 { 2980 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2981 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2982 2983 __ bind(LisVolatile); 2984 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2985 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2986 __ twi_0(R17_tos); 2987 __ isync(); 2988 break; 2989 } 2990 case Bytecodes::_fast_bgetfield: 2991 { 2992 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2993 __ extsb(R17_tos, R17_tos); 2994 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2995 2996 __ bind(LisVolatile); 2997 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2998 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2999 __ twi_0(R17_tos); 3000 __ extsb(R17_tos, R17_tos); 3001 __ isync(); 3002 break; 3003 } 3004 case Bytecodes::_fast_cgetfield: 3005 { 3006 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3007 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3008 3009 __ bind(LisVolatile); 3010 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3011 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3012 __ twi_0(R17_tos); 3013 __ isync(); 3014 break; 3015 } 3016 case Bytecodes::_fast_sgetfield: 3017 { 3018 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3019 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3020 3021 __ bind(LisVolatile); 3022 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3023 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3024 __ twi_0(R17_tos); 3025 __ isync(); 3026 break; 3027 } 3028 case Bytecodes::_fast_fgetfield: 3029 { 3030 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3031 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3032 3033 __ bind(LisVolatile); 3034 Label Ldummy; 3035 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3036 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3037 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3038 __ bne_predict_not_taken(CCR0, Ldummy); 3039 __ bind(Ldummy); 3040 __ isync(); 3041 break; 3042 } 3043 case Bytecodes::_fast_dgetfield: 3044 { 3045 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3046 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3047 3048 __ bind(LisVolatile); 3049 Label Ldummy; 3050 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3051 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3052 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3053 __ bne_predict_not_taken(CCR0, Ldummy); 3054 __ bind(Ldummy); 3055 __ isync(); 3056 break; 3057 } 3058 default: ShouldNotReachHere(); 3059 } 3060 } 3061 3062 void TemplateTable::fast_xaccess(TosState state) { 3063 transition(vtos, state); 3064 3065 Label LisVolatile; 3066 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3067 const Register Rcache = R3_ARG1, 3068 Rclass_or_obj = R17_tos, 3069 Roffset = R22_tmp2, 3070 Rflags = R23_tmp3, 3071 Rscratch = R12_scratch2; 3072 3073 __ ld(Rclass_or_obj, 0, R18_locals); 3074 3075 // Constant pool already resolved. Get the field offset. 3076 __ get_cache_and_index_at_bcp(Rcache, 2); 3077 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3078 3079 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3080 3081 // Needed to report exception at the correct bcp. 3082 __ addi(R14_bcp, R14_bcp, 1); 3083 3084 // Get the load address. 3085 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3086 3087 // Get volatile flag. 3088 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3089 __ bne(CCR0, LisVolatile); 3090 3091 switch(state) { 3092 case atos: 3093 { 3094 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3095 __ verify_oop(R17_tos); 3096 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3097 3098 __ bind(LisVolatile); 3099 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3100 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3101 __ verify_oop(R17_tos); 3102 __ twi_0(R17_tos); 3103 __ isync(); 3104 break; 3105 } 3106 case itos: 3107 { 3108 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3109 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3110 3111 __ bind(LisVolatile); 3112 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3113 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3114 __ twi_0(R17_tos); 3115 __ isync(); 3116 break; 3117 } 3118 case ftos: 3119 { 3120 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3121 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3122 3123 __ bind(LisVolatile); 3124 Label Ldummy; 3125 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3126 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3127 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3128 __ bne_predict_not_taken(CCR0, Ldummy); 3129 __ bind(Ldummy); 3130 __ isync(); 3131 break; 3132 } 3133 default: ShouldNotReachHere(); 3134 } 3135 __ addi(R14_bcp, R14_bcp, -1); 3136 } 3137 3138 // ============================================================================ 3139 // Calls 3140 3141 // Common code for invoke 3142 // 3143 // Input: 3144 // - byte_no 3145 // 3146 // Output: 3147 // - Rmethod: The method to invoke next. 3148 // - Rret_addr: The return address to return to. 3149 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3150 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3151 // - Rflags: Method flags from const pool cache. 3152 // 3153 // Kills: 3154 // - Rscratch1 3155 // 3156 void TemplateTable::prepare_invoke(int byte_no, 3157 Register Rmethod, // linked method (or i-klass) 3158 Register Rret_addr,// return address 3159 Register Rindex, // itable index, MethodType, etc. 3160 Register Rrecv, // If caller wants to see it. 3161 Register Rflags, // If caller wants to test it. 3162 Register Rscratch 3163 ) { 3164 // Determine flags. 3165 const Bytecodes::Code code = bytecode(); 3166 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3167 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3168 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3169 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3170 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3171 const bool load_receiver = (Rrecv != noreg); 3172 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3173 3174 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3175 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3176 assert_different_registers(Rret_addr, Rscratch); 3177 3178 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3179 3180 // Saving of SP done in call_from_interpreter. 3181 3182 // Maybe push "appendix" to arguments. 3183 if (is_invokedynamic || is_invokehandle) { 3184 Label Ldone; 3185 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3186 __ beq(CCR0, Ldone); 3187 // Push "appendix" (MethodType, CallSite, etc.). 3188 // This must be done before we get the receiver, 3189 // since the parameter_size includes it. 3190 __ load_resolved_reference_at_index(Rscratch, Rindex); 3191 __ verify_oop(Rscratch); 3192 __ push_ptr(Rscratch); 3193 __ bind(Ldone); 3194 } 3195 3196 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3197 if (load_receiver) { 3198 const Register Rparam_count = Rscratch; 3199 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3200 __ load_receiver(Rparam_count, Rrecv); 3201 __ verify_oop(Rrecv); 3202 } 3203 3204 // Get return address. 3205 { 3206 Register Rtable_addr = Rscratch; 3207 Register Rret_type = Rret_addr; 3208 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3209 3210 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3211 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3212 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3213 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3214 // Get return address. 3215 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3216 } 3217 } 3218 3219 // Helper for virtual calls. Load target out of vtable and jump off! 3220 // Kills all passed registers. 3221 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3222 3223 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3224 const Register Rtarget_method = Rindex; 3225 3226 // Get target method & entry point. 3227 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3228 // Calc vtable addr scale the vtable index by 8. 3229 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3230 // Load target. 3231 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3232 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3233 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3234 } 3235 3236 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3237 void TemplateTable::invokevirtual(int byte_no) { 3238 transition(vtos, vtos); 3239 3240 Register Rtable_addr = R11_scratch1, 3241 Rret_type = R12_scratch2, 3242 Rret_addr = R5_ARG3, 3243 Rflags = R22_tmp2, // Should survive C call. 3244 Rrecv = R3_ARG1, 3245 Rrecv_klass = Rrecv, 3246 Rvtableindex_or_method = R31, // Should survive C call. 3247 Rnum_params = R4_ARG2, 3248 Rnew_bc = R6_ARG4; 3249 3250 Label LnotFinal; 3251 3252 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3253 3254 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3255 __ bfalse(CCR0, LnotFinal); 3256 3257 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3258 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3259 3260 __ align(32, 12); 3261 __ bind(LnotFinal); 3262 // Load "this" pointer (receiver). 3263 __ rldicl(Rnum_params, Rflags, 64, 48); 3264 __ load_receiver(Rnum_params, Rrecv); 3265 __ verify_oop(Rrecv); 3266 3267 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3268 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3269 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3270 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3271 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3272 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3273 __ load_klass(Rrecv_klass, Rrecv); 3274 __ verify_klass_ptr(Rrecv_klass); 3275 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3276 3277 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3278 } 3279 3280 void TemplateTable::fast_invokevfinal(int byte_no) { 3281 transition(vtos, vtos); 3282 3283 assert(byte_no == f2_byte, "use this argument"); 3284 Register Rflags = R22_tmp2, 3285 Rmethod = R31; 3286 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3287 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3288 } 3289 3290 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3291 3292 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3293 3294 // Load receiver from stack slot. 3295 Register Rrecv = Rscratch2; 3296 Register Rnum_params = Rrecv; 3297 3298 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3299 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3300 3301 // Get return address. 3302 Register Rtable_addr = Rscratch1, 3303 Rret_addr = Rflags, 3304 Rret_type = Rret_addr; 3305 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3306 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3307 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3308 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3309 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3310 3311 // Load receiver and receiver NULL check. 3312 __ load_receiver(Rnum_params, Rrecv); 3313 __ null_check_throw(Rrecv, -1, Rscratch1); 3314 3315 __ profile_final_call(Rrecv, Rscratch1); 3316 3317 // Do the call. 3318 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3319 } 3320 3321 void TemplateTable::invokespecial(int byte_no) { 3322 assert(byte_no == f1_byte, "use this argument"); 3323 transition(vtos, vtos); 3324 3325 Register Rtable_addr = R3_ARG1, 3326 Rret_addr = R4_ARG2, 3327 Rflags = R5_ARG3, 3328 Rreceiver = R6_ARG4, 3329 Rmethod = R31; 3330 3331 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3332 3333 // Receiver NULL check. 3334 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3335 3336 __ profile_call(R11_scratch1, R12_scratch2); 3337 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3338 } 3339 3340 void TemplateTable::invokestatic(int byte_no) { 3341 assert(byte_no == f1_byte, "use this argument"); 3342 transition(vtos, vtos); 3343 3344 Register Rtable_addr = R3_ARG1, 3345 Rret_addr = R4_ARG2, 3346 Rflags = R5_ARG3; 3347 3348 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3349 3350 __ profile_call(R11_scratch1, R12_scratch2); 3351 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3352 } 3353 3354 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3355 Register Rret, 3356 Register Rflags, 3357 Register Rindex, 3358 Register Rtemp1, 3359 Register Rtemp2) { 3360 3361 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3362 Label LnotFinal; 3363 3364 // Check for vfinal. 3365 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3366 __ bfalse(CCR0, LnotFinal); 3367 3368 Register Rscratch = Rflags; // Rflags is dead now. 3369 3370 // Final call case. 3371 __ profile_final_call(Rtemp1, Rscratch); 3372 // Do the final call - the index (f2) contains the method. 3373 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3374 3375 // Non-final callc case. 3376 __ bind(LnotFinal); 3377 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3378 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3379 } 3380 3381 void TemplateTable::invokeinterface(int byte_no) { 3382 assert(byte_no == f1_byte, "use this argument"); 3383 transition(vtos, vtos); 3384 3385 const Register Rscratch1 = R11_scratch1, 3386 Rscratch2 = R12_scratch2, 3387 Rscratch3 = R9_ARG7, 3388 Rscratch4 = R10_ARG8, 3389 Rtable_addr = Rscratch2, 3390 Rinterface_klass = R5_ARG3, 3391 Rret_type = R8_ARG6, 3392 Rret_addr = Rret_type, 3393 Rindex = R6_ARG4, 3394 Rreceiver = R4_ARG2, 3395 Rrecv_klass = Rreceiver, 3396 Rflags = R7_ARG5; 3397 3398 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3399 3400 // Get receiver klass. 3401 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3402 __ load_klass(Rrecv_klass, Rreceiver); 3403 3404 // Check corner case object method. 3405 Label LobjectMethod; 3406 3407 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3408 __ btrue(CCR0, LobjectMethod); 3409 3410 // Fallthrough: The normal invokeinterface case. 3411 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3412 3413 // Find entry point to call. 3414 Label Lthrow_icc, Lthrow_ame; 3415 // Result will be returned in Rindex. 3416 __ mr(Rscratch4, Rrecv_klass); 3417 __ mr(Rscratch3, Rindex); 3418 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3419 3420 __ cmpdi(CCR0, Rindex, 0); 3421 __ beq(CCR0, Lthrow_ame); 3422 // Found entry. Jump off! 3423 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3424 3425 // Vtable entry was NULL => Throw abstract method error. 3426 __ bind(Lthrow_ame); 3427 __ mr(Rrecv_klass, Rscratch4); 3428 __ mr(Rindex, Rscratch3); 3429 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3430 3431 // Interface was not found => Throw incompatible class change error. 3432 __ bind(Lthrow_icc); 3433 __ mr(Rrecv_klass, Rscratch4); 3434 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3435 3436 __ should_not_reach_here(); 3437 3438 // Special case of invokeinterface called for virtual method of 3439 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3440 // The invokeinterface was rewritten to a invokevirtual, hence we have 3441 // to handle this corner case. This code isn't produced by javac, but could 3442 // be produced by another compliant java compiler. 3443 __ bind(LobjectMethod); 3444 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3445 } 3446 3447 void TemplateTable::invokedynamic(int byte_no) { 3448 transition(vtos, vtos); 3449 3450 const Register Rret_addr = R3_ARG1, 3451 Rflags = R4_ARG2, 3452 Rmethod = R22_tmp2, 3453 Rscratch1 = R11_scratch1, 3454 Rscratch2 = R12_scratch2; 3455 3456 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3457 3458 // Profile this call. 3459 __ profile_call(Rscratch1, Rscratch2); 3460 3461 // Off we go. With the new method handles, we don't jump to a method handle 3462 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3463 // to be the callsite object the bootstrap method returned. This is passed to a 3464 // "link" method which does the dispatch (Most likely just grabs the MH stored 3465 // inside the callsite and does an invokehandle). 3466 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3467 } 3468 3469 void TemplateTable::invokehandle(int byte_no) { 3470 transition(vtos, vtos); 3471 3472 const Register Rret_addr = R3_ARG1, 3473 Rflags = R4_ARG2, 3474 Rrecv = R5_ARG3, 3475 Rmethod = R22_tmp2, 3476 Rscratch1 = R11_scratch1, 3477 Rscratch2 = R12_scratch2; 3478 3479 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3480 __ verify_method_ptr(Rmethod); 3481 __ null_check_throw(Rrecv, -1, Rscratch2); 3482 3483 __ profile_final_call(Rrecv, Rscratch1); 3484 3485 // Still no call from handle => We call the method handle interpreter here. 3486 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3487 } 3488 3489 // ============================================================================= 3490 // Allocation 3491 3492 // Puts allocated obj ref onto the expression stack. 3493 void TemplateTable::_new() { 3494 transition(vtos, atos); 3495 3496 Label Lslow_case, 3497 Ldone, 3498 Linitialize_header, 3499 Lallocate_shared, 3500 Linitialize_object; // Including clearing the fields. 3501 3502 const Register RallocatedObject = R17_tos, 3503 RinstanceKlass = R9_ARG7, 3504 Rscratch = R11_scratch1, 3505 Roffset = R8_ARG6, 3506 Rinstance_size = Roffset, 3507 Rcpool = R4_ARG2, 3508 Rtags = R3_ARG1, 3509 Rindex = R5_ARG3; 3510 3511 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3512 3513 // -------------------------------------------------------------------------- 3514 // Check if fast case is possible. 3515 3516 // Load pointers to const pool and const pool's tags array. 3517 __ get_cpool_and_tags(Rcpool, Rtags); 3518 // Load index of constant pool entry. 3519 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3520 3521 if (UseTLAB) { 3522 // Make sure the class we're about to instantiate has been resolved 3523 // This is done before loading instanceKlass to be consistent with the order 3524 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3525 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3526 __ lbzx(Rtags, Rindex, Rtags); 3527 3528 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3529 __ bne(CCR0, Lslow_case); 3530 3531 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3532 __ sldi(Roffset, Rindex, LogBytesPerWord); 3533 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3534 __ isync(); // Order load of instance Klass wrt. tags. 3535 __ ldx(RinstanceKlass, Roffset, Rscratch); 3536 3537 // Make sure klass is fully initialized and get instance_size. 3538 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3539 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3540 3541 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3542 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3543 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3544 3545 __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized? 3546 __ beq(CCR0, Lslow_case); 3547 3548 // -------------------------------------------------------------------------- 3549 // Fast case: 3550 // Allocate the instance. 3551 // 1) Try to allocate in the TLAB. 3552 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3553 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3554 3555 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3556 Register RnewTopValue = R6_ARG4; 3557 Register RendValue = R7_ARG5; 3558 3559 // Check if we can allocate in the TLAB. 3560 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3561 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3562 3563 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3564 3565 // If there is enough space, we do not CAS and do not clear. 3566 __ cmpld(CCR0, RnewTopValue, RendValue); 3567 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3568 3569 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3570 3571 if (ZeroTLAB) { 3572 // The fields have already been cleared. 3573 __ b(Linitialize_header); 3574 } else { 3575 // Initialize both the header and fields. 3576 __ b(Linitialize_object); 3577 } 3578 3579 // Fall through: TLAB was too small. 3580 if (allow_shared_alloc) { 3581 Register RtlabWasteLimitValue = R10_ARG8; 3582 Register RfreeValue = RnewTopValue; 3583 3584 __ bind(Lallocate_shared); 3585 // Check if tlab should be discarded (refill_waste_limit >= free). 3586 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3587 __ subf(RfreeValue, RoldTopValue, RendValue); 3588 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3589 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3590 __ bge(CCR0, Lslow_case); 3591 3592 // Increment waste limit to prevent getting stuck on this slow path. 3593 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3594 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3595 } 3596 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3597 } 3598 // else: Always go the slow path. 3599 3600 // -------------------------------------------------------------------------- 3601 // slow case 3602 __ bind(Lslow_case); 3603 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3604 3605 if (UseTLAB) { 3606 __ b(Ldone); 3607 // -------------------------------------------------------------------------- 3608 // Init1: Zero out newly allocated memory. 3609 3610 if (!ZeroTLAB || allow_shared_alloc) { 3611 // Clear object fields. 3612 __ bind(Linitialize_object); 3613 3614 // Initialize remaining object fields. 3615 Register Rbase = Rtags; 3616 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3617 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3618 __ srdi(Rinstance_size, Rinstance_size, 3); 3619 3620 // Clear out object skipping header. Takes also care of the zero length case. 3621 __ clear_memory_doubleword(Rbase, Rinstance_size); 3622 // fallthru: __ b(Linitialize_header); 3623 } 3624 3625 // -------------------------------------------------------------------------- 3626 // Init2: Initialize the header: mark, klass 3627 __ bind(Linitialize_header); 3628 3629 // Init mark. 3630 if (UseBiasedLocking) { 3631 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3632 } else { 3633 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3634 } 3635 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3636 3637 // Init klass. 3638 __ store_klass_gap(RallocatedObject); 3639 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3640 3641 // Check and trigger dtrace event. 3642 { 3643 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3644 __ push(atos); 3645 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3646 __ pop(atos); 3647 } 3648 } 3649 3650 // continue 3651 __ bind(Ldone); 3652 3653 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3654 __ membar(Assembler::StoreStore); 3655 } 3656 3657 void TemplateTable::newarray() { 3658 transition(itos, atos); 3659 3660 __ lbz(R4, 1, R14_bcp); 3661 __ extsw(R5, R17_tos); 3662 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3663 3664 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3665 __ membar(Assembler::StoreStore); 3666 } 3667 3668 void TemplateTable::anewarray() { 3669 transition(itos, atos); 3670 3671 __ get_constant_pool(R4); 3672 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3673 __ extsw(R6, R17_tos); // size 3674 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3675 3676 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3677 __ membar(Assembler::StoreStore); 3678 } 3679 3680 // Allocate a multi dimensional array 3681 void TemplateTable::multianewarray() { 3682 transition(vtos, atos); 3683 3684 Register Rptr = R31; // Needs to survive C call. 3685 3686 // Put ndims * wordSize into frame temp slot 3687 __ lbz(Rptr, 3, R14_bcp); 3688 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3689 // Esp points past last_dim, so set to R4 to first_dim address. 3690 __ add(R4, Rptr, R15_esp); 3691 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3692 // Pop all dimensions off the stack. 3693 __ add(R15_esp, Rptr, R15_esp); 3694 3695 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3696 __ membar(Assembler::StoreStore); 3697 } 3698 3699 void TemplateTable::arraylength() { 3700 transition(atos, itos); 3701 3702 Label LnoException; 3703 __ verify_oop(R17_tos); 3704 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3705 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3706 } 3707 3708 // ============================================================================ 3709 // Typechecks 3710 3711 void TemplateTable::checkcast() { 3712 transition(atos, atos); 3713 3714 Label Ldone, Lis_null, Lquicked, Lresolved; 3715 Register Roffset = R6_ARG4, 3716 RobjKlass = R4_ARG2, 3717 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3718 Rcpool = R11_scratch1, 3719 Rtags = R12_scratch2; 3720 3721 // Null does not pass. 3722 __ cmpdi(CCR0, R17_tos, 0); 3723 __ beq(CCR0, Lis_null); 3724 3725 // Get constant pool tag to find out if the bytecode has already been "quickened". 3726 __ get_cpool_and_tags(Rcpool, Rtags); 3727 3728 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3729 3730 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3731 __ lbzx(Rtags, Rtags, Roffset); 3732 3733 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3734 __ beq(CCR0, Lquicked); 3735 3736 // Call into the VM to "quicken" instanceof. 3737 __ push_ptr(); // for GC 3738 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3739 __ get_vm_result_2(RspecifiedKlass); 3740 __ pop_ptr(); // Restore receiver. 3741 __ b(Lresolved); 3742 3743 // Extract target class from constant pool. 3744 __ bind(Lquicked); 3745 __ sldi(Roffset, Roffset, LogBytesPerWord); 3746 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3747 __ isync(); // Order load of specified Klass wrt. tags. 3748 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3749 3750 // Do the checkcast. 3751 __ bind(Lresolved); 3752 // Get value klass in RobjKlass. 3753 __ load_klass(RobjKlass, R17_tos); 3754 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3755 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3756 3757 // Not a subtype; so must throw exception 3758 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3759 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3760 __ mtctr(R11_scratch1); 3761 __ bctr(); 3762 3763 // Profile the null case. 3764 __ align(32, 12); 3765 __ bind(Lis_null); 3766 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3767 3768 __ align(32, 12); 3769 __ bind(Ldone); 3770 } 3771 3772 // Output: 3773 // - tos == 0: Obj was null or not an instance of class. 3774 // - tos == 1: Obj was an instance of class. 3775 void TemplateTable::instanceof() { 3776 transition(atos, itos); 3777 3778 Label Ldone, Lis_null, Lquicked, Lresolved; 3779 Register Roffset = R5_ARG3, 3780 RobjKlass = R4_ARG2, 3781 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register. 3782 Rcpool = R11_scratch1, 3783 Rtags = R12_scratch2; 3784 3785 // Null does not pass. 3786 __ cmpdi(CCR0, R17_tos, 0); 3787 __ beq(CCR0, Lis_null); 3788 3789 // Get constant pool tag to find out if the bytecode has already been "quickened". 3790 __ get_cpool_and_tags(Rcpool, Rtags); 3791 3792 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3793 3794 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3795 __ lbzx(Rtags, Rtags, Roffset); 3796 3797 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3798 __ beq(CCR0, Lquicked); 3799 3800 // Call into the VM to "quicken" instanceof. 3801 __ push_ptr(); // for GC 3802 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3803 __ get_vm_result_2(RspecifiedKlass); 3804 __ pop_ptr(); // Restore receiver. 3805 __ b(Lresolved); 3806 3807 // Extract target class from constant pool. 3808 __ bind(Lquicked); 3809 __ sldi(Roffset, Roffset, LogBytesPerWord); 3810 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3811 __ isync(); // Order load of specified Klass wrt. tags. 3812 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3813 3814 // Do the checkcast. 3815 __ bind(Lresolved); 3816 // Get value klass in RobjKlass. 3817 __ load_klass(RobjKlass, R17_tos); 3818 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3819 __ li(R17_tos, 1); 3820 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3821 __ li(R17_tos, 0); 3822 3823 if (ProfileInterpreter) { 3824 __ b(Ldone); 3825 } 3826 3827 // Profile the null case. 3828 __ align(32, 12); 3829 __ bind(Lis_null); 3830 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3831 3832 __ align(32, 12); 3833 __ bind(Ldone); 3834 } 3835 3836 // ============================================================================= 3837 // Breakpoints 3838 3839 void TemplateTable::_breakpoint() { 3840 transition(vtos, vtos); 3841 3842 // Get the unpatched byte code. 3843 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3844 __ mr(R31, R3_RET); 3845 3846 // Post the breakpoint event. 3847 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3848 3849 // Complete the execution of original bytecode. 3850 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3851 } 3852 3853 // ============================================================================= 3854 // Exceptions 3855 3856 void TemplateTable::athrow() { 3857 transition(atos, vtos); 3858 3859 // Exception oop is in tos 3860 __ verify_oop(R17_tos); 3861 3862 __ null_check_throw(R17_tos, -1, R11_scratch1); 3863 3864 // Throw exception interpreter entry expects exception oop to be in R3. 3865 __ mr(R3_RET, R17_tos); 3866 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3867 __ mtctr(R11_scratch1); 3868 __ bctr(); 3869 } 3870 3871 // ============================================================================= 3872 // Synchronization 3873 // Searches the basic object lock list on the stack for a free slot 3874 // and uses it to lock the obect in tos. 3875 // 3876 // Recursive locking is enabled by exiting the search if the same 3877 // object is already found in the list. Thus, a new basic lock obj lock 3878 // is allocated "higher up" in the stack and thus is found first 3879 // at next monitor exit. 3880 void TemplateTable::monitorenter() { 3881 transition(atos, vtos); 3882 3883 __ verify_oop(R17_tos); 3884 3885 Register Rcurrent_monitor = R11_scratch1, 3886 Rcurrent_obj = R12_scratch2, 3887 Robj_to_lock = R17_tos, 3888 Rscratch1 = R3_ARG1, 3889 Rscratch2 = R4_ARG2, 3890 Rscratch3 = R5_ARG3, 3891 Rcurrent_obj_addr = R6_ARG4; 3892 3893 // ------------------------------------------------------------------------------ 3894 // Null pointer exception. 3895 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3896 3897 // Try to acquire a lock on the object. 3898 // Repeat until succeeded (i.e., until monitorenter returns true). 3899 3900 // ------------------------------------------------------------------------------ 3901 // Find a free slot in the monitor block. 3902 Label Lfound, Lexit, Lallocate_new; 3903 ConditionRegister found_free_slot = CCR0, 3904 found_same_obj = CCR1, 3905 reached_limit = CCR6; 3906 { 3907 Label Lloop, Lentry; 3908 Register Rlimit = Rcurrent_monitor; 3909 3910 // Set up search loop - start with topmost monitor. 3911 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 3912 3913 __ ld(Rlimit, 0, R1_SP); 3914 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 3915 3916 // Check if any slot is present => short cut to allocation if not. 3917 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3918 __ bgt(reached_limit, Lallocate_new); 3919 3920 // Pre-load topmost slot. 3921 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3922 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3923 // The search loop. 3924 __ bind(Lloop); 3925 // Found free slot? 3926 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 3927 // Is this entry for same obj? If so, stop the search and take the found 3928 // free slot or allocate a new one to enable recursive locking. 3929 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 3930 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3931 __ beq(found_free_slot, Lexit); 3932 __ beq(found_same_obj, Lallocate_new); 3933 __ bgt(reached_limit, Lallocate_new); 3934 // Check if last allocated BasicLockObj reached. 3935 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3936 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3937 // Next iteration if unchecked BasicObjectLocks exist on the stack. 3938 __ b(Lloop); 3939 } 3940 3941 // ------------------------------------------------------------------------------ 3942 // Check if we found a free slot. 3943 __ bind(Lexit); 3944 3945 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 3946 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 3947 __ b(Lfound); 3948 3949 // We didn't find a free BasicObjLock => allocate one. 3950 __ align(32, 12); 3951 __ bind(Lallocate_new); 3952 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 3953 __ mr(Rcurrent_monitor, R26_monitor); 3954 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 3955 3956 // ------------------------------------------------------------------------------ 3957 // We now have a slot to lock. 3958 __ bind(Lfound); 3959 3960 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3961 // The object has already been poped from the stack, so the expression stack looks correct. 3962 __ addi(R14_bcp, R14_bcp, 1); 3963 3964 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 3965 __ lock_object(Rcurrent_monitor, Robj_to_lock); 3966 3967 // Check if there's enough space on the stack for the monitors after locking. 3968 Label Lskip_stack_check; 3969 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 3970 // the stack check. There should be enough shadow pages to fit that in. 3971 __ ld(Rscratch3, 0, R1_SP); 3972 __ sub(Rscratch3, Rscratch3, R26_monitor); 3973 __ cmpdi(CCR0, Rscratch3, 4*K); 3974 __ blt(CCR0, Lskip_stack_check); 3975 3976 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 3977 __ li(Rscratch1, 0); 3978 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 3979 3980 __ align(32, 12); 3981 __ bind(Lskip_stack_check); 3982 3983 // The bcp has already been incremented. Just need to dispatch to next instruction. 3984 __ dispatch_next(vtos); 3985 } 3986 3987 void TemplateTable::monitorexit() { 3988 transition(atos, vtos); 3989 __ verify_oop(R17_tos); 3990 3991 Register Rcurrent_monitor = R11_scratch1, 3992 Rcurrent_obj = R12_scratch2, 3993 Robj_to_lock = R17_tos, 3994 Rcurrent_obj_addr = R3_ARG1, 3995 Rlimit = R4_ARG2; 3996 Label Lfound, Lillegal_monitor_state; 3997 3998 // Check corner case: unbalanced monitorEnter / Exit. 3999 __ ld(Rlimit, 0, R1_SP); 4000 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4001 4002 // Null pointer check. 4003 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4004 4005 __ cmpld(CCR0, R26_monitor, Rlimit); 4006 __ bgt(CCR0, Lillegal_monitor_state); 4007 4008 // Find the corresponding slot in the monitors stack section. 4009 { 4010 Label Lloop; 4011 4012 // Start with topmost monitor. 4013 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4014 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4015 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4016 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4017 4018 __ bind(Lloop); 4019 // Is this entry for same obj? 4020 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4021 __ beq(CCR0, Lfound); 4022 4023 // Check if last allocated BasicLockObj reached. 4024 4025 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4026 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4027 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4028 4029 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4030 __ ble(CCR0, Lloop); 4031 } 4032 4033 // Fell through without finding the basic obj lock => throw up! 4034 __ bind(Lillegal_monitor_state); 4035 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4036 __ should_not_reach_here(); 4037 4038 __ align(32, 12); 4039 __ bind(Lfound); 4040 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4041 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4042 __ unlock_object(Rcurrent_monitor); 4043 } 4044 4045 // ============================================================================ 4046 // Wide bytecodes 4047 4048 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4049 void TemplateTable::wide() { 4050 transition(vtos, vtos); 4051 4052 const Register Rtable = R11_scratch1, 4053 Rindex = R12_scratch2, 4054 Rtmp = R0; 4055 4056 __ lbz(Rindex, 1, R14_bcp); 4057 4058 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4059 4060 __ slwi(Rindex, Rindex, LogBytesPerWord); 4061 __ ldx(Rtmp, Rtable, Rindex); 4062 __ mtctr(Rtmp); 4063 __ bctr(); 4064 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4065 } 4066 #endif // !CC_INTERP