1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/templateTable.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/safepointMechanism.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 #include "utilities/macros.hpp" 40 41 #define __ _masm-> 42 43 // Misc helpers 44 45 // Do an oop store like *(base + index + offset) = val 46 // index can be noreg, 47 static void do_oop_store(InterpreterMacroAssembler* _masm, 48 Register base, 49 Register index, 50 int offset, 51 Register val, 52 Register tmp, 53 BarrierSet::Name barrier, 54 bool precise) { 55 assert(tmp != val && tmp != base && tmp != index, "register collision"); 56 assert(index == noreg || offset == 0, "only one offset"); 57 switch (barrier) { 58 #if INCLUDE_ALL_GCS 59 case BarrierSet::G1BarrierSet: 60 { 61 // Load and record the previous value. 62 __ g1_write_barrier_pre(base, index, offset, 63 noreg /* pre_val */, 64 tmp, true /*preserve_o_regs*/); 65 66 // G1 barrier needs uncompressed oop for region cross check. 67 Register new_val = val; 68 if (UseCompressedOops && val != G0) { 69 new_val = tmp; 70 __ mov(val, new_val); 71 } 72 73 if (index == noreg ) { 74 assert(Assembler::is_simm13(offset), "fix this code"); 75 __ store_heap_oop(val, base, offset); 76 } else { 77 __ store_heap_oop(val, base, index); 78 } 79 80 // No need for post barrier if storing NULL 81 if (val != G0) { 82 if (precise) { 83 if (index == noreg) { 84 __ add(base, offset, base); 85 } else { 86 __ add(base, index, base); 87 } 88 } 89 __ g1_write_barrier_post(base, new_val, tmp); 90 } 91 } 92 break; 93 #endif // INCLUDE_ALL_GCS 94 case BarrierSet::CardTableModRef: 95 { 96 if (index == noreg ) { 97 assert(Assembler::is_simm13(offset), "fix this code"); 98 __ store_heap_oop(val, base, offset); 99 } else { 100 __ store_heap_oop(val, base, index); 101 } 102 // No need for post barrier if storing NULL 103 if (val != G0) { 104 if (precise) { 105 if (index == noreg) { 106 __ add(base, offset, base); 107 } else { 108 __ add(base, index, base); 109 } 110 } 111 __ card_write_barrier_post(base, val, tmp); 112 } 113 } 114 break; 115 case BarrierSet::ModRef: 116 ShouldNotReachHere(); 117 break; 118 default : 119 ShouldNotReachHere(); 120 121 } 122 } 123 124 125 //---------------------------------------------------------------------------------------------------- 126 // Platform-dependent initialization 127 128 void TemplateTable::pd_initialize() { 129 // (none) 130 } 131 132 133 //---------------------------------------------------------------------------------------------------- 134 // Condition conversion 135 Assembler::Condition ccNot(TemplateTable::Condition cc) { 136 switch (cc) { 137 case TemplateTable::equal : return Assembler::notEqual; 138 case TemplateTable::not_equal : return Assembler::equal; 139 case TemplateTable::less : return Assembler::greaterEqual; 140 case TemplateTable::less_equal : return Assembler::greater; 141 case TemplateTable::greater : return Assembler::lessEqual; 142 case TemplateTable::greater_equal: return Assembler::less; 143 } 144 ShouldNotReachHere(); 145 return Assembler::zero; 146 } 147 148 //---------------------------------------------------------------------------------------------------- 149 // Miscelaneous helper routines 150 151 152 Address TemplateTable::at_bcp(int offset) { 153 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 154 return Address(Lbcp, offset); 155 } 156 157 158 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 159 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 160 int byte_no) { 161 // With sharing on, may need to test Method* flag. 162 if (!RewriteBytecodes) return; 163 Label L_patch_done; 164 165 switch (bc) { 166 case Bytecodes::_fast_aputfield: 167 case Bytecodes::_fast_bputfield: 168 case Bytecodes::_fast_zputfield: 169 case Bytecodes::_fast_cputfield: 170 case Bytecodes::_fast_dputfield: 171 case Bytecodes::_fast_fputfield: 172 case Bytecodes::_fast_iputfield: 173 case Bytecodes::_fast_lputfield: 174 case Bytecodes::_fast_sputfield: 175 { 176 // We skip bytecode quickening for putfield instructions when 177 // the put_code written to the constant pool cache is zero. 178 // This is required so that every execution of this instruction 179 // calls out to InterpreterRuntime::resolve_get_put to do 180 // additional, required work. 181 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 182 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 183 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 184 __ set(bc, bc_reg); 185 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 186 } 187 break; 188 default: 189 assert(byte_no == -1, "sanity"); 190 if (load_bc_into_bc_reg) { 191 __ set(bc, bc_reg); 192 } 193 } 194 195 if (JvmtiExport::can_post_breakpoint()) { 196 Label L_fast_patch; 197 __ ldub(at_bcp(0), temp_reg); 198 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 199 // perform the quickening, slowly, in the bowels of the breakpoint table 200 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 201 __ ba_short(L_patch_done); 202 __ bind(L_fast_patch); 203 } 204 205 #ifdef ASSERT 206 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 207 Label L_okay; 208 __ ldub(at_bcp(0), temp_reg); 209 __ cmp(temp_reg, orig_bytecode); 210 __ br(Assembler::equal, false, Assembler::pt, L_okay); 211 __ delayed()->cmp(temp_reg, bc_reg); 212 __ br(Assembler::equal, false, Assembler::pt, L_okay); 213 __ delayed()->nop(); 214 __ stop("patching the wrong bytecode"); 215 __ bind(L_okay); 216 #endif 217 218 // patch bytecode 219 __ stb(bc_reg, at_bcp(0)); 220 __ bind(L_patch_done); 221 } 222 223 //---------------------------------------------------------------------------------------------------- 224 // Individual instructions 225 226 void TemplateTable::nop() { 227 transition(vtos, vtos); 228 // nothing to do 229 } 230 231 void TemplateTable::shouldnotreachhere() { 232 transition(vtos, vtos); 233 __ stop("shouldnotreachhere bytecode"); 234 } 235 236 void TemplateTable::aconst_null() { 237 transition(vtos, atos); 238 __ clr(Otos_i); 239 } 240 241 242 void TemplateTable::iconst(int value) { 243 transition(vtos, itos); 244 __ set(value, Otos_i); 245 } 246 247 248 void TemplateTable::lconst(int value) { 249 transition(vtos, ltos); 250 assert(value >= 0, "check this code"); 251 __ set(value, Otos_l); 252 } 253 254 255 void TemplateTable::fconst(int value) { 256 transition(vtos, ftos); 257 static float zero = 0.0, one = 1.0, two = 2.0; 258 float* p; 259 switch( value ) { 260 default: ShouldNotReachHere(); 261 case 0: p = &zero; break; 262 case 1: p = &one; break; 263 case 2: p = &two; break; 264 } 265 AddressLiteral a(p); 266 __ sethi(a, G3_scratch); 267 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 268 } 269 270 271 void TemplateTable::dconst(int value) { 272 transition(vtos, dtos); 273 static double zero = 0.0, one = 1.0; 274 double* p; 275 switch( value ) { 276 default: ShouldNotReachHere(); 277 case 0: p = &zero; break; 278 case 1: p = &one; break; 279 } 280 AddressLiteral a(p); 281 __ sethi(a, G3_scratch); 282 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 283 } 284 285 286 // %%%%% Should factore most snippet templates across platforms 287 288 void TemplateTable::bipush() { 289 transition(vtos, itos); 290 __ ldsb( at_bcp(1), Otos_i ); 291 } 292 293 void TemplateTable::sipush() { 294 transition(vtos, itos); 295 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 296 } 297 298 void TemplateTable::ldc(bool wide) { 299 transition(vtos, vtos); 300 Label call_ldc, notInt, isString, notString, notClass, exit; 301 302 if (wide) { 303 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 304 } else { 305 __ ldub(Lbcp, 1, O1); 306 } 307 __ get_cpool_and_tags(O0, O2); 308 309 const int base_offset = ConstantPool::header_size() * wordSize; 310 const int tags_offset = Array<u1>::base_offset_in_bytes(); 311 312 // get type from tags 313 __ add(O2, tags_offset, O2); 314 __ ldub(O2, O1, O2); 315 316 // unresolved class? If so, must resolve 317 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 318 319 // unresolved class in error state 320 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 321 322 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 323 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 324 __ delayed()->add(O0, base_offset, O0); 325 326 __ bind(call_ldc); 327 __ set(wide, O1); 328 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 329 __ push(atos); 330 __ ba_short(exit); 331 332 __ bind(notClass); 333 // __ add(O0, base_offset, O0); 334 __ sll(O1, LogBytesPerWord, O1); 335 __ cmp(O2, JVM_CONSTANT_Integer); 336 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 337 __ delayed()->cmp(O2, JVM_CONSTANT_String); 338 __ ld(O0, O1, Otos_i); 339 __ push(itos); 340 __ ba_short(exit); 341 342 __ bind(notInt); 343 // __ cmp(O2, JVM_CONSTANT_String); 344 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 345 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 346 __ bind(isString); 347 __ stop("string should be rewritten to fast_aldc"); 348 __ ba_short(exit); 349 350 __ bind(notString); 351 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 352 __ push(ftos); 353 354 __ bind(exit); 355 } 356 357 // Fast path for caching oop constants. 358 // %%% We should use this to handle Class and String constants also. 359 // %%% It will simplify the ldc/primitive path considerably. 360 void TemplateTable::fast_aldc(bool wide) { 361 transition(vtos, atos); 362 363 int index_size = wide ? sizeof(u2) : sizeof(u1); 364 Label resolved; 365 366 // We are resolved if the resolved reference cache entry contains a 367 // non-null object (CallSite, etc.) 368 assert_different_registers(Otos_i, G3_scratch); 369 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 370 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 371 __ tst(Otos_i); 372 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 373 __ delayed()->set((int)bytecode(), O1); 374 375 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 376 377 // first time invocation - must resolve first 378 __ call_VM(Otos_i, entry, O1); 379 __ bind(resolved); 380 __ verify_oop(Otos_i); 381 } 382 383 void TemplateTable::ldc2_w() { 384 transition(vtos, vtos); 385 Label Long, exit; 386 387 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 388 __ get_cpool_and_tags(O0, O2); 389 390 const int base_offset = ConstantPool::header_size() * wordSize; 391 const int tags_offset = Array<u1>::base_offset_in_bytes(); 392 // get type from tags 393 __ add(O2, tags_offset, O2); 394 __ ldub(O2, O1, O2); 395 396 __ sll(O1, LogBytesPerWord, O1); 397 __ add(O0, O1, G3_scratch); 398 399 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 400 // A double can be placed at word-aligned locations in the constant pool. 401 // Check out Conversions.java for an example. 402 // Also ConstantPool::header_size() is 20, which makes it very difficult 403 // to double-align double on the constant pool. SG, 11/7/97 404 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 405 __ push(dtos); 406 __ ba_short(exit); 407 408 __ bind(Long); 409 __ ldx(G3_scratch, base_offset, Otos_l); 410 __ push(ltos); 411 412 __ bind(exit); 413 } 414 415 void TemplateTable::locals_index(Register reg, int offset) { 416 __ ldub( at_bcp(offset), reg ); 417 } 418 419 void TemplateTable::locals_index_wide(Register reg) { 420 // offset is 2, not 1, because Lbcp points to wide prefix code 421 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 422 } 423 424 void TemplateTable::iload() { 425 iload_internal(); 426 } 427 428 void TemplateTable::nofast_iload() { 429 iload_internal(may_not_rewrite); 430 } 431 432 void TemplateTable::iload_internal(RewriteControl rc) { 433 transition(vtos, itos); 434 // Rewrite iload,iload pair into fast_iload2 435 // iload,caload pair into fast_icaload 436 if (RewriteFrequentPairs && rc == may_rewrite) { 437 Label rewrite, done; 438 439 // get next byte 440 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 441 442 // if _iload, wait to rewrite to iload2. We only want to rewrite the 443 // last two iloads in a pair. Comparing against fast_iload means that 444 // the next bytecode is neither an iload or a caload, and therefore 445 // an iload pair. 446 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 447 448 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 449 __ br(Assembler::equal, false, Assembler::pn, rewrite); 450 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 451 452 __ cmp(G3_scratch, (int)Bytecodes::_caload); 453 __ br(Assembler::equal, false, Assembler::pn, rewrite); 454 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 455 456 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 457 // rewrite 458 // G4_scratch: fast bytecode 459 __ bind(rewrite); 460 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 461 __ bind(done); 462 } 463 464 // Get the local value into tos 465 locals_index(G3_scratch); 466 __ access_local_int( G3_scratch, Otos_i ); 467 } 468 469 void TemplateTable::fast_iload2() { 470 transition(vtos, itos); 471 locals_index(G3_scratch); 472 __ access_local_int( G3_scratch, Otos_i ); 473 __ push_i(); 474 locals_index(G3_scratch, 3); // get next bytecode's local index. 475 __ access_local_int( G3_scratch, Otos_i ); 476 } 477 478 void TemplateTable::fast_iload() { 479 transition(vtos, itos); 480 locals_index(G3_scratch); 481 __ access_local_int( G3_scratch, Otos_i ); 482 } 483 484 void TemplateTable::lload() { 485 transition(vtos, ltos); 486 locals_index(G3_scratch); 487 __ access_local_long( G3_scratch, Otos_l ); 488 } 489 490 491 void TemplateTable::fload() { 492 transition(vtos, ftos); 493 locals_index(G3_scratch); 494 __ access_local_float( G3_scratch, Ftos_f ); 495 } 496 497 498 void TemplateTable::dload() { 499 transition(vtos, dtos); 500 locals_index(G3_scratch); 501 __ access_local_double( G3_scratch, Ftos_d ); 502 } 503 504 505 void TemplateTable::aload() { 506 transition(vtos, atos); 507 locals_index(G3_scratch); 508 __ access_local_ptr( G3_scratch, Otos_i); 509 } 510 511 512 void TemplateTable::wide_iload() { 513 transition(vtos, itos); 514 locals_index_wide(G3_scratch); 515 __ access_local_int( G3_scratch, Otos_i ); 516 } 517 518 519 void TemplateTable::wide_lload() { 520 transition(vtos, ltos); 521 locals_index_wide(G3_scratch); 522 __ access_local_long( G3_scratch, Otos_l ); 523 } 524 525 526 void TemplateTable::wide_fload() { 527 transition(vtos, ftos); 528 locals_index_wide(G3_scratch); 529 __ access_local_float( G3_scratch, Ftos_f ); 530 } 531 532 533 void TemplateTable::wide_dload() { 534 transition(vtos, dtos); 535 locals_index_wide(G3_scratch); 536 __ access_local_double( G3_scratch, Ftos_d ); 537 } 538 539 540 void TemplateTable::wide_aload() { 541 transition(vtos, atos); 542 locals_index_wide(G3_scratch); 543 __ access_local_ptr( G3_scratch, Otos_i ); 544 __ verify_oop(Otos_i); 545 } 546 547 548 void TemplateTable::iaload() { 549 transition(itos, itos); 550 // Otos_i: index 551 // tos: array 552 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 553 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 554 } 555 556 557 void TemplateTable::laload() { 558 transition(itos, ltos); 559 // Otos_i: index 560 // O2: array 561 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 562 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 563 } 564 565 566 void TemplateTable::faload() { 567 transition(itos, ftos); 568 // Otos_i: index 569 // O2: array 570 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 571 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 572 } 573 574 575 void TemplateTable::daload() { 576 transition(itos, dtos); 577 // Otos_i: index 578 // O2: array 579 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 580 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 581 } 582 583 584 void TemplateTable::aaload() { 585 transition(itos, atos); 586 // Otos_i: index 587 // tos: array 588 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 589 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 590 __ verify_oop(Otos_i); 591 } 592 593 594 void TemplateTable::baload() { 595 transition(itos, itos); 596 // Otos_i: index 597 // tos: array 598 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 599 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 600 } 601 602 603 void TemplateTable::caload() { 604 transition(itos, itos); 605 // Otos_i: index 606 // tos: array 607 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 608 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 609 } 610 611 void TemplateTable::fast_icaload() { 612 transition(vtos, itos); 613 // Otos_i: index 614 // tos: array 615 locals_index(G3_scratch); 616 __ access_local_int( G3_scratch, Otos_i ); 617 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 618 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 619 } 620 621 622 void TemplateTable::saload() { 623 transition(itos, itos); 624 // Otos_i: index 625 // tos: array 626 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 627 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 628 } 629 630 631 void TemplateTable::iload(int n) { 632 transition(vtos, itos); 633 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 634 } 635 636 637 void TemplateTable::lload(int n) { 638 transition(vtos, ltos); 639 assert(n+1 < Argument::n_register_parameters, "would need more code"); 640 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 641 } 642 643 644 void TemplateTable::fload(int n) { 645 transition(vtos, ftos); 646 assert(n < Argument::n_register_parameters, "would need more code"); 647 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 648 } 649 650 651 void TemplateTable::dload(int n) { 652 transition(vtos, dtos); 653 FloatRegister dst = Ftos_d; 654 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 655 } 656 657 658 void TemplateTable::aload(int n) { 659 transition(vtos, atos); 660 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 661 } 662 663 void TemplateTable::aload_0() { 664 aload_0_internal(); 665 } 666 667 void TemplateTable::nofast_aload_0() { 668 aload_0_internal(may_not_rewrite); 669 } 670 671 void TemplateTable::aload_0_internal(RewriteControl rc) { 672 transition(vtos, atos); 673 674 // According to bytecode histograms, the pairs: 675 // 676 // _aload_0, _fast_igetfield (itos) 677 // _aload_0, _fast_agetfield (atos) 678 // _aload_0, _fast_fgetfield (ftos) 679 // 680 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 681 // bytecode checks the next bytecode and then rewrites the current 682 // bytecode into a pair bytecode; otherwise it rewrites the current 683 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 684 // 685 if (RewriteFrequentPairs && rc == may_rewrite) { 686 Label rewrite, done; 687 688 // get next byte 689 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 690 691 // if _getfield then wait with rewrite 692 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 693 694 // if _igetfield then rewrite to _fast_iaccess_0 695 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 696 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 697 __ br(Assembler::equal, false, Assembler::pn, rewrite); 698 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 699 700 // if _agetfield then rewrite to _fast_aaccess_0 701 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 702 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 703 __ br(Assembler::equal, false, Assembler::pn, rewrite); 704 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 705 706 // if _fgetfield then rewrite to _fast_faccess_0 707 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 708 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 709 __ br(Assembler::equal, false, Assembler::pn, rewrite); 710 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 711 712 // else rewrite to _fast_aload0 713 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 714 __ set(Bytecodes::_fast_aload_0, G4_scratch); 715 716 // rewrite 717 // G4_scratch: fast bytecode 718 __ bind(rewrite); 719 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 720 __ bind(done); 721 } 722 723 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 724 aload(0); 725 } 726 727 void TemplateTable::istore() { 728 transition(itos, vtos); 729 locals_index(G3_scratch); 730 __ store_local_int( G3_scratch, Otos_i ); 731 } 732 733 734 void TemplateTable::lstore() { 735 transition(ltos, vtos); 736 locals_index(G3_scratch); 737 __ store_local_long( G3_scratch, Otos_l ); 738 } 739 740 741 void TemplateTable::fstore() { 742 transition(ftos, vtos); 743 locals_index(G3_scratch); 744 __ store_local_float( G3_scratch, Ftos_f ); 745 } 746 747 748 void TemplateTable::dstore() { 749 transition(dtos, vtos); 750 locals_index(G3_scratch); 751 __ store_local_double( G3_scratch, Ftos_d ); 752 } 753 754 755 void TemplateTable::astore() { 756 transition(vtos, vtos); 757 __ load_ptr(0, Otos_i); 758 __ inc(Lesp, Interpreter::stackElementSize); 759 __ verify_oop_or_return_address(Otos_i, G3_scratch); 760 locals_index(G3_scratch); 761 __ store_local_ptr(G3_scratch, Otos_i); 762 } 763 764 765 void TemplateTable::wide_istore() { 766 transition(vtos, vtos); 767 __ pop_i(); 768 locals_index_wide(G3_scratch); 769 __ store_local_int( G3_scratch, Otos_i ); 770 } 771 772 773 void TemplateTable::wide_lstore() { 774 transition(vtos, vtos); 775 __ pop_l(); 776 locals_index_wide(G3_scratch); 777 __ store_local_long( G3_scratch, Otos_l ); 778 } 779 780 781 void TemplateTable::wide_fstore() { 782 transition(vtos, vtos); 783 __ pop_f(); 784 locals_index_wide(G3_scratch); 785 __ store_local_float( G3_scratch, Ftos_f ); 786 } 787 788 789 void TemplateTable::wide_dstore() { 790 transition(vtos, vtos); 791 __ pop_d(); 792 locals_index_wide(G3_scratch); 793 __ store_local_double( G3_scratch, Ftos_d ); 794 } 795 796 797 void TemplateTable::wide_astore() { 798 transition(vtos, vtos); 799 __ load_ptr(0, Otos_i); 800 __ inc(Lesp, Interpreter::stackElementSize); 801 __ verify_oop_or_return_address(Otos_i, G3_scratch); 802 locals_index_wide(G3_scratch); 803 __ store_local_ptr(G3_scratch, Otos_i); 804 } 805 806 807 void TemplateTable::iastore() { 808 transition(itos, vtos); 809 __ pop_i(O2); // index 810 // Otos_i: val 811 // O3: array 812 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 813 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 814 } 815 816 817 void TemplateTable::lastore() { 818 transition(ltos, vtos); 819 __ pop_i(O2); // index 820 // Otos_l: val 821 // O3: array 822 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 823 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 824 } 825 826 827 void TemplateTable::fastore() { 828 transition(ftos, vtos); 829 __ pop_i(O2); // index 830 // Ftos_f: val 831 // O3: array 832 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 833 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 834 } 835 836 837 void TemplateTable::dastore() { 838 transition(dtos, vtos); 839 __ pop_i(O2); // index 840 // Fos_d: val 841 // O3: array 842 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 843 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 844 } 845 846 847 void TemplateTable::aastore() { 848 Label store_ok, is_null, done; 849 transition(vtos, vtos); 850 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 851 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 852 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 853 // Otos_i: val 854 // O2: index 855 // O3: array 856 __ verify_oop(Otos_i); 857 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 858 859 // do array store check - check for NULL value first 860 __ br_null_short( Otos_i, Assembler::pn, is_null ); 861 862 __ load_klass(O3, O4); // get array klass 863 __ load_klass(Otos_i, O5); // get value klass 864 865 // do fast instanceof cache test 866 867 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 868 869 assert(Otos_i == O0, "just checking"); 870 871 // Otos_i: value 872 // O1: addr - offset 873 // O2: index 874 // O3: array 875 // O4: array element klass 876 // O5: value klass 877 878 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 879 880 // Generate a fast subtype check. Branch to store_ok if no 881 // failure. Throw if failure. 882 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 883 884 // Not a subtype; so must throw exception 885 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 886 887 // Store is OK. 888 __ bind(store_ok); 889 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 890 891 __ ba(done); 892 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 893 894 __ bind(is_null); 895 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 896 897 __ profile_null_seen(G3_scratch); 898 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 899 __ bind(done); 900 } 901 902 903 void TemplateTable::bastore() { 904 transition(itos, vtos); 905 __ pop_i(O2); // index 906 // Otos_i: val 907 // O2: index 908 // O3: array 909 __ index_check(O3, O2, 0, G3_scratch, O2); 910 // Need to check whether array is boolean or byte 911 // since both types share the bastore bytecode. 912 __ load_klass(O3, G4_scratch); 913 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch); 914 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch); 915 __ andcc(G3_scratch, G4_scratch, G0); 916 Label L_skip; 917 __ br(Assembler::zero, false, Assembler::pn, L_skip); 918 __ delayed()->nop(); 919 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1 920 __ bind(L_skip); 921 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 922 } 923 924 925 void TemplateTable::castore() { 926 transition(itos, vtos); 927 __ pop_i(O2); // index 928 // Otos_i: val 929 // O3: array 930 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 931 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 932 } 933 934 935 void TemplateTable::sastore() { 936 // %%%%% Factor across platform 937 castore(); 938 } 939 940 941 void TemplateTable::istore(int n) { 942 transition(itos, vtos); 943 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 944 } 945 946 947 void TemplateTable::lstore(int n) { 948 transition(ltos, vtos); 949 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 950 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 951 952 } 953 954 955 void TemplateTable::fstore(int n) { 956 transition(ftos, vtos); 957 assert(n < Argument::n_register_parameters, "only handle register cases"); 958 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 959 } 960 961 962 void TemplateTable::dstore(int n) { 963 transition(dtos, vtos); 964 FloatRegister src = Ftos_d; 965 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 966 } 967 968 969 void TemplateTable::astore(int n) { 970 transition(vtos, vtos); 971 __ load_ptr(0, Otos_i); 972 __ inc(Lesp, Interpreter::stackElementSize); 973 __ verify_oop_or_return_address(Otos_i, G3_scratch); 974 __ store_local_ptr(n, Otos_i); 975 } 976 977 978 void TemplateTable::pop() { 979 transition(vtos, vtos); 980 __ inc(Lesp, Interpreter::stackElementSize); 981 } 982 983 984 void TemplateTable::pop2() { 985 transition(vtos, vtos); 986 __ inc(Lesp, 2 * Interpreter::stackElementSize); 987 } 988 989 990 void TemplateTable::dup() { 991 transition(vtos, vtos); 992 // stack: ..., a 993 // load a and tag 994 __ load_ptr(0, Otos_i); 995 __ push_ptr(Otos_i); 996 // stack: ..., a, a 997 } 998 999 1000 void TemplateTable::dup_x1() { 1001 transition(vtos, vtos); 1002 // stack: ..., a, b 1003 __ load_ptr( 1, G3_scratch); // get a 1004 __ load_ptr( 0, Otos_l1); // get b 1005 __ store_ptr(1, Otos_l1); // put b 1006 __ store_ptr(0, G3_scratch); // put a - like swap 1007 __ push_ptr(Otos_l1); // push b 1008 // stack: ..., b, a, b 1009 } 1010 1011 1012 void TemplateTable::dup_x2() { 1013 transition(vtos, vtos); 1014 // stack: ..., a, b, c 1015 // get c and push on stack, reuse registers 1016 __ load_ptr( 0, G3_scratch); // get c 1017 __ push_ptr(G3_scratch); // push c with tag 1018 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1019 // (stack offsets n+1 now) 1020 __ load_ptr( 3, Otos_l1); // get a 1021 __ store_ptr(3, G3_scratch); // put c at 3 1022 // stack: ..., c, b, c, c (a in reg) 1023 __ load_ptr( 2, G3_scratch); // get b 1024 __ store_ptr(2, Otos_l1); // put a at 2 1025 // stack: ..., c, a, c, c (b in reg) 1026 __ store_ptr(1, G3_scratch); // put b at 1 1027 // stack: ..., c, a, b, c 1028 } 1029 1030 1031 void TemplateTable::dup2() { 1032 transition(vtos, vtos); 1033 __ load_ptr(1, G3_scratch); // get a 1034 __ load_ptr(0, Otos_l1); // get b 1035 __ push_ptr(G3_scratch); // push a 1036 __ push_ptr(Otos_l1); // push b 1037 // stack: ..., a, b, a, b 1038 } 1039 1040 1041 void TemplateTable::dup2_x1() { 1042 transition(vtos, vtos); 1043 // stack: ..., a, b, c 1044 __ load_ptr( 1, Lscratch); // get b 1045 __ load_ptr( 2, Otos_l1); // get a 1046 __ store_ptr(2, Lscratch); // put b at a 1047 // stack: ..., b, b, c 1048 __ load_ptr( 0, G3_scratch); // get c 1049 __ store_ptr(1, G3_scratch); // put c at b 1050 // stack: ..., b, c, c 1051 __ store_ptr(0, Otos_l1); // put a at c 1052 // stack: ..., b, c, a 1053 __ push_ptr(Lscratch); // push b 1054 __ push_ptr(G3_scratch); // push c 1055 // stack: ..., b, c, a, b, c 1056 } 1057 1058 1059 // The spec says that these types can be a mixture of category 1 (1 word) 1060 // types and/or category 2 types (long and doubles) 1061 void TemplateTable::dup2_x2() { 1062 transition(vtos, vtos); 1063 // stack: ..., a, b, c, d 1064 __ load_ptr( 1, Lscratch); // get c 1065 __ load_ptr( 3, Otos_l1); // get a 1066 __ store_ptr(3, Lscratch); // put c at 3 1067 __ store_ptr(1, Otos_l1); // put a at 1 1068 // stack: ..., c, b, a, d 1069 __ load_ptr( 2, G3_scratch); // get b 1070 __ load_ptr( 0, Otos_l1); // get d 1071 __ store_ptr(0, G3_scratch); // put b at 0 1072 __ store_ptr(2, Otos_l1); // put d at 2 1073 // stack: ..., c, d, a, b 1074 __ push_ptr(Lscratch); // push c 1075 __ push_ptr(Otos_l1); // push d 1076 // stack: ..., c, d, a, b, c, d 1077 } 1078 1079 1080 void TemplateTable::swap() { 1081 transition(vtos, vtos); 1082 // stack: ..., a, b 1083 __ load_ptr( 1, G3_scratch); // get a 1084 __ load_ptr( 0, Otos_l1); // get b 1085 __ store_ptr(0, G3_scratch); // put b 1086 __ store_ptr(1, Otos_l1); // put a 1087 // stack: ..., b, a 1088 } 1089 1090 1091 void TemplateTable::iop2(Operation op) { 1092 transition(itos, itos); 1093 __ pop_i(O1); 1094 switch (op) { 1095 case add: __ add(O1, Otos_i, Otos_i); break; 1096 case sub: __ sub(O1, Otos_i, Otos_i); break; 1097 // %%%%% Mul may not exist: better to call .mul? 1098 case mul: __ smul(O1, Otos_i, Otos_i); break; 1099 case _and: __ and3(O1, Otos_i, Otos_i); break; 1100 case _or: __ or3(O1, Otos_i, Otos_i); break; 1101 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1102 case shl: __ sll(O1, Otos_i, Otos_i); break; 1103 case shr: __ sra(O1, Otos_i, Otos_i); break; 1104 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1105 default: ShouldNotReachHere(); 1106 } 1107 } 1108 1109 1110 void TemplateTable::lop2(Operation op) { 1111 transition(ltos, ltos); 1112 __ pop_l(O2); 1113 switch (op) { 1114 case add: __ add(O2, Otos_l, Otos_l); break; 1115 case sub: __ sub(O2, Otos_l, Otos_l); break; 1116 case _and: __ and3(O2, Otos_l, Otos_l); break; 1117 case _or: __ or3(O2, Otos_l, Otos_l); break; 1118 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1119 default: ShouldNotReachHere(); 1120 } 1121 } 1122 1123 1124 void TemplateTable::idiv() { 1125 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1126 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1127 1128 transition(itos, itos); 1129 __ pop_i(O1); // get 1st op 1130 1131 // Y contains upper 32 bits of result, set it to 0 or all ones 1132 __ wry(G0); 1133 __ mov(~0, G3_scratch); 1134 1135 __ tst(O1); 1136 Label neg; 1137 __ br(Assembler::negative, true, Assembler::pn, neg); 1138 __ delayed()->wry(G3_scratch); 1139 __ bind(neg); 1140 1141 Label ok; 1142 __ tst(Otos_i); 1143 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1144 1145 const int min_int = 0x80000000; 1146 Label regular; 1147 __ cmp(Otos_i, -1); 1148 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1149 // Don't put set in delay slot 1150 // Set will turn into multiple instructions in 64 bit mode 1151 __ delayed()->nop(); 1152 __ set(min_int, G4_scratch); 1153 Label done; 1154 __ cmp(O1, G4_scratch); 1155 __ br(Assembler::equal, true, Assembler::pt, done); 1156 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1157 1158 __ bind(regular); 1159 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1160 __ bind(done); 1161 } 1162 1163 1164 void TemplateTable::irem() { 1165 transition(itos, itos); 1166 __ mov(Otos_i, O2); // save divisor 1167 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1168 __ smul(Otos_i, O2, Otos_i); 1169 __ sub(O1, Otos_i, Otos_i); 1170 } 1171 1172 1173 void TemplateTable::lmul() { 1174 transition(ltos, ltos); 1175 __ pop_l(O2); 1176 __ mulx(Otos_l, O2, Otos_l); 1177 1178 } 1179 1180 1181 void TemplateTable::ldiv() { 1182 transition(ltos, ltos); 1183 1184 // check for zero 1185 __ pop_l(O2); 1186 __ tst(Otos_l); 1187 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1188 __ sdivx(O2, Otos_l, Otos_l); 1189 } 1190 1191 1192 void TemplateTable::lrem() { 1193 transition(ltos, ltos); 1194 1195 // check for zero 1196 __ pop_l(O2); 1197 __ tst(Otos_l); 1198 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1199 __ sdivx(O2, Otos_l, Otos_l2); 1200 __ mulx (Otos_l2, Otos_l, Otos_l2); 1201 __ sub (O2, Otos_l2, Otos_l); 1202 } 1203 1204 1205 void TemplateTable::lshl() { 1206 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1207 1208 __ pop_l(O2); // shift value in O2, O3 1209 __ sllx(O2, Otos_i, Otos_l); 1210 } 1211 1212 1213 void TemplateTable::lshr() { 1214 transition(itos, ltos); // %%%% see lshl comment 1215 1216 __ pop_l(O2); // shift value in O2, O3 1217 __ srax(O2, Otos_i, Otos_l); 1218 } 1219 1220 1221 1222 void TemplateTable::lushr() { 1223 transition(itos, ltos); // %%%% see lshl comment 1224 1225 __ pop_l(O2); // shift value in O2, O3 1226 __ srlx(O2, Otos_i, Otos_l); 1227 } 1228 1229 1230 void TemplateTable::fop2(Operation op) { 1231 transition(ftos, ftos); 1232 switch (op) { 1233 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1234 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1235 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1236 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1237 case rem: 1238 assert(Ftos_f == F0, "just checking"); 1239 // LP64 calling conventions use F1, F3 for passing 2 floats 1240 __ pop_f(F1); 1241 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1242 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1243 assert( Ftos_f == F0, "fix this code" ); 1244 break; 1245 1246 default: ShouldNotReachHere(); 1247 } 1248 } 1249 1250 1251 void TemplateTable::dop2(Operation op) { 1252 transition(dtos, dtos); 1253 switch (op) { 1254 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1255 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1256 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1257 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1258 case rem: 1259 // Pass arguments in D0, D2 1260 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1261 __ pop_d( F0 ); 1262 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1263 assert( Ftos_d == F0, "fix this code" ); 1264 break; 1265 1266 default: ShouldNotReachHere(); 1267 } 1268 } 1269 1270 1271 void TemplateTable::ineg() { 1272 transition(itos, itos); 1273 __ neg(Otos_i); 1274 } 1275 1276 1277 void TemplateTable::lneg() { 1278 transition(ltos, ltos); 1279 __ sub(G0, Otos_l, Otos_l); 1280 } 1281 1282 1283 void TemplateTable::fneg() { 1284 transition(ftos, ftos); 1285 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1286 } 1287 1288 1289 void TemplateTable::dneg() { 1290 transition(dtos, dtos); 1291 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1292 } 1293 1294 1295 void TemplateTable::iinc() { 1296 transition(vtos, vtos); 1297 locals_index(G3_scratch); 1298 __ ldsb(Lbcp, 2, O2); // load constant 1299 __ access_local_int(G3_scratch, Otos_i); 1300 __ add(Otos_i, O2, Otos_i); 1301 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1302 } 1303 1304 1305 void TemplateTable::wide_iinc() { 1306 transition(vtos, vtos); 1307 locals_index_wide(G3_scratch); 1308 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1309 __ access_local_int(G3_scratch, Otos_i); 1310 __ add(Otos_i, O3, Otos_i); 1311 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1312 } 1313 1314 1315 void TemplateTable::convert() { 1316 // %%%%% Factor this first part accross platforms 1317 #ifdef ASSERT 1318 TosState tos_in = ilgl; 1319 TosState tos_out = ilgl; 1320 switch (bytecode()) { 1321 case Bytecodes::_i2l: // fall through 1322 case Bytecodes::_i2f: // fall through 1323 case Bytecodes::_i2d: // fall through 1324 case Bytecodes::_i2b: // fall through 1325 case Bytecodes::_i2c: // fall through 1326 case Bytecodes::_i2s: tos_in = itos; break; 1327 case Bytecodes::_l2i: // fall through 1328 case Bytecodes::_l2f: // fall through 1329 case Bytecodes::_l2d: tos_in = ltos; break; 1330 case Bytecodes::_f2i: // fall through 1331 case Bytecodes::_f2l: // fall through 1332 case Bytecodes::_f2d: tos_in = ftos; break; 1333 case Bytecodes::_d2i: // fall through 1334 case Bytecodes::_d2l: // fall through 1335 case Bytecodes::_d2f: tos_in = dtos; break; 1336 default : ShouldNotReachHere(); 1337 } 1338 switch (bytecode()) { 1339 case Bytecodes::_l2i: // fall through 1340 case Bytecodes::_f2i: // fall through 1341 case Bytecodes::_d2i: // fall through 1342 case Bytecodes::_i2b: // fall through 1343 case Bytecodes::_i2c: // fall through 1344 case Bytecodes::_i2s: tos_out = itos; break; 1345 case Bytecodes::_i2l: // fall through 1346 case Bytecodes::_f2l: // fall through 1347 case Bytecodes::_d2l: tos_out = ltos; break; 1348 case Bytecodes::_i2f: // fall through 1349 case Bytecodes::_l2f: // fall through 1350 case Bytecodes::_d2f: tos_out = ftos; break; 1351 case Bytecodes::_i2d: // fall through 1352 case Bytecodes::_l2d: // fall through 1353 case Bytecodes::_f2d: tos_out = dtos; break; 1354 default : ShouldNotReachHere(); 1355 } 1356 transition(tos_in, tos_out); 1357 #endif 1358 1359 1360 // Conversion 1361 Label done; 1362 switch (bytecode()) { 1363 case Bytecodes::_i2l: 1364 // Sign extend the 32 bits 1365 __ sra ( Otos_i, 0, Otos_l ); 1366 break; 1367 1368 case Bytecodes::_i2f: 1369 __ st(Otos_i, __ d_tmp ); 1370 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1371 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1372 break; 1373 1374 case Bytecodes::_i2d: 1375 __ st(Otos_i, __ d_tmp); 1376 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1377 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1378 break; 1379 1380 case Bytecodes::_i2b: 1381 __ sll(Otos_i, 24, Otos_i); 1382 __ sra(Otos_i, 24, Otos_i); 1383 break; 1384 1385 case Bytecodes::_i2c: 1386 __ sll(Otos_i, 16, Otos_i); 1387 __ srl(Otos_i, 16, Otos_i); 1388 break; 1389 1390 case Bytecodes::_i2s: 1391 __ sll(Otos_i, 16, Otos_i); 1392 __ sra(Otos_i, 16, Otos_i); 1393 break; 1394 1395 case Bytecodes::_l2i: 1396 // Sign-extend into the high 32 bits 1397 __ sra(Otos_l, 0, Otos_i); 1398 break; 1399 1400 case Bytecodes::_l2f: 1401 case Bytecodes::_l2d: 1402 __ st_long(Otos_l, __ d_tmp); 1403 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1404 1405 if (bytecode() == Bytecodes::_l2f) { 1406 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1407 } else { 1408 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1409 } 1410 break; 1411 1412 case Bytecodes::_f2i: { 1413 Label isNaN; 1414 // result must be 0 if value is NaN; test by comparing value to itself 1415 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1416 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1417 __ delayed()->clr(Otos_i); // NaN 1418 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1419 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1420 __ ld(__ d_tmp, Otos_i); 1421 __ bind(isNaN); 1422 } 1423 break; 1424 1425 case Bytecodes::_f2l: 1426 // must uncache tos 1427 __ push_f(); 1428 __ pop_f(F1); 1429 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1430 break; 1431 1432 case Bytecodes::_f2d: 1433 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1434 break; 1435 1436 case Bytecodes::_d2i: 1437 case Bytecodes::_d2l: 1438 // must uncache tos 1439 __ push_d(); 1440 // LP64 calling conventions pass first double arg in D0 1441 __ pop_d( Ftos_d ); 1442 __ call_VM_leaf(Lscratch, 1443 bytecode() == Bytecodes::_d2i 1444 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1445 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1446 break; 1447 1448 case Bytecodes::_d2f: 1449 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1450 break; 1451 1452 default: ShouldNotReachHere(); 1453 } 1454 __ bind(done); 1455 } 1456 1457 1458 void TemplateTable::lcmp() { 1459 transition(ltos, itos); 1460 1461 __ pop_l(O1); // pop off value 1, value 2 is in O0 1462 __ lcmp( O1, Otos_l, Otos_i ); 1463 } 1464 1465 1466 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1467 1468 if (is_float) __ pop_f(F2); 1469 else __ pop_d(F2); 1470 1471 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1472 1473 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1474 } 1475 1476 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1477 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1478 __ verify_thread(); 1479 1480 const Register O2_bumped_count = O2; 1481 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1482 1483 // get (wide) offset to O1_disp 1484 const Register O1_disp = O1; 1485 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1486 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1487 1488 // Handle all the JSR stuff here, then exit. 1489 // It's much shorter and cleaner than intermingling with the 1490 // non-JSR normal-branch stuff occurring below. 1491 if( is_jsr ) { 1492 // compute return address as bci in Otos_i 1493 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1494 __ sub(Lbcp, G3_scratch, G3_scratch); 1495 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1496 1497 // Bump Lbcp to target of JSR 1498 __ add(Lbcp, O1_disp, Lbcp); 1499 // Push returnAddress for "ret" on stack 1500 __ push_ptr(Otos_i); 1501 // And away we go! 1502 __ dispatch_next(vtos, 0, true); 1503 return; 1504 } 1505 1506 // Normal (non-jsr) branch handling 1507 1508 // Save the current Lbcp 1509 const Register l_cur_bcp = Lscratch; 1510 __ mov( Lbcp, l_cur_bcp ); 1511 1512 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1513 if ( increment_invocation_counter_for_backward_branches ) { 1514 Label Lforward; 1515 // check branch direction 1516 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1517 // Bump bytecode pointer by displacement (take the branch) 1518 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1519 1520 const Register G3_method_counters = G3_scratch; 1521 __ get_method_counters(Lmethod, G3_method_counters, Lforward); 1522 1523 if (TieredCompilation) { 1524 Label Lno_mdo, Loverflow; 1525 int increment = InvocationCounter::count_increment; 1526 if (ProfileInterpreter) { 1527 // If no method data exists, go to profile_continue. 1528 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1529 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1530 1531 // Increment backedge counter in the MDO 1532 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1533 in_bytes(InvocationCounter::counter_offset())); 1534 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset())); 1535 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1536 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1537 __ ba_short(Loverflow); 1538 } 1539 1540 // If there's no MDO, increment counter in MethodCounters* 1541 __ bind(Lno_mdo); 1542 Address backedge_counter(G3_method_counters, 1543 in_bytes(MethodCounters::backedge_counter_offset()) + 1544 in_bytes(InvocationCounter::counter_offset())); 1545 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset())); 1546 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1547 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1548 __ bind(Loverflow); 1549 1550 // notify point for loop, pass branch bytecode 1551 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1552 1553 // Was an OSR adapter generated? 1554 // O0 = osr nmethod 1555 __ br_null_short(O0, Assembler::pn, Lforward); 1556 1557 // Has the nmethod been invalidated already? 1558 __ ldub(O0, nmethod::state_offset(), O2); 1559 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward); 1560 1561 // migrate the interpreter frame off of the stack 1562 1563 __ mov(G2_thread, L7); 1564 // save nmethod 1565 __ mov(O0, L6); 1566 __ set_last_Java_frame(SP, noreg); 1567 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1568 __ reset_last_Java_frame(); 1569 __ mov(L7, G2_thread); 1570 1571 // move OSR nmethod to I1 1572 __ mov(L6, I1); 1573 1574 // OSR buffer to I0 1575 __ mov(O0, I0); 1576 1577 // remove the interpreter frame 1578 __ restore(I5_savedSP, 0, SP); 1579 1580 // Jump to the osr code. 1581 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1582 __ jmp(O2, G0); 1583 __ delayed()->nop(); 1584 1585 } else { // not TieredCompilation 1586 // Update Backedge branch separately from invocations 1587 const Register G4_invoke_ctr = G4; 1588 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch); 1589 if (ProfileInterpreter) { 1590 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward); 1591 if (UseOnStackReplacement) { 1592 1593 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch); 1594 } 1595 } else { 1596 if (UseOnStackReplacement) { 1597 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch); 1598 } 1599 } 1600 } 1601 1602 __ bind(Lforward); 1603 } else 1604 // Bump bytecode pointer by displacement (take the branch) 1605 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1606 1607 // continue with bytecode @ target 1608 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1609 // %%%%% and changing dispatch_next to dispatch_only 1610 __ dispatch_next(vtos, 0, true); 1611 } 1612 1613 1614 // Note Condition in argument is TemplateTable::Condition 1615 // arg scope is within class scope 1616 1617 void TemplateTable::if_0cmp(Condition cc) { 1618 // no pointers, integer only! 1619 transition(itos, vtos); 1620 // assume branch is more often taken than not (loops use backward branches) 1621 __ cmp( Otos_i, 0); 1622 __ if_cmp(ccNot(cc), false); 1623 } 1624 1625 1626 void TemplateTable::if_icmp(Condition cc) { 1627 transition(itos, vtos); 1628 __ pop_i(O1); 1629 __ cmp(O1, Otos_i); 1630 __ if_cmp(ccNot(cc), false); 1631 } 1632 1633 1634 void TemplateTable::if_nullcmp(Condition cc) { 1635 transition(atos, vtos); 1636 __ tst(Otos_i); 1637 __ if_cmp(ccNot(cc), true); 1638 } 1639 1640 1641 void TemplateTable::if_acmp(Condition cc) { 1642 transition(atos, vtos); 1643 __ pop_ptr(O1); 1644 __ verify_oop(O1); 1645 __ verify_oop(Otos_i); 1646 __ cmp(O1, Otos_i); 1647 __ if_cmp(ccNot(cc), true); 1648 } 1649 1650 1651 1652 void TemplateTable::ret() { 1653 transition(vtos, vtos); 1654 locals_index(G3_scratch); 1655 __ access_local_returnAddress(G3_scratch, Otos_i); 1656 // Otos_i contains the bci, compute the bcp from that 1657 1658 #ifdef ASSERT 1659 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1660 // the result. The return address (really a BCI) was stored with an 1661 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1662 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1663 // loaded value. 1664 { Label zzz ; 1665 __ set (65536, G3_scratch) ; 1666 __ cmp (Otos_i, G3_scratch) ; 1667 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1668 __ delayed()->nop(); 1669 __ stop("BCI is in the wrong register half?"); 1670 __ bind (zzz) ; 1671 } 1672 #endif 1673 1674 __ profile_ret(vtos, Otos_i, G4_scratch); 1675 1676 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1677 __ add(G3_scratch, Otos_i, G3_scratch); 1678 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1679 __ dispatch_next(vtos, 0, true); 1680 } 1681 1682 1683 void TemplateTable::wide_ret() { 1684 transition(vtos, vtos); 1685 locals_index_wide(G3_scratch); 1686 __ access_local_returnAddress(G3_scratch, Otos_i); 1687 // Otos_i contains the bci, compute the bcp from that 1688 1689 __ profile_ret(vtos, Otos_i, G4_scratch); 1690 1691 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1692 __ add(G3_scratch, Otos_i, G3_scratch); 1693 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1694 __ dispatch_next(vtos, 0, true); 1695 } 1696 1697 1698 void TemplateTable::tableswitch() { 1699 transition(itos, vtos); 1700 Label default_case, continue_execution; 1701 1702 // align bcp 1703 __ add(Lbcp, BytesPerInt, O1); 1704 __ and3(O1, -BytesPerInt, O1); 1705 // load lo, hi 1706 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1707 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1708 // Sign extend the 32 bits 1709 __ sra ( Otos_i, 0, Otos_i ); 1710 1711 // check against lo & hi 1712 __ cmp( Otos_i, O2); 1713 __ br( Assembler::less, false, Assembler::pn, default_case); 1714 __ delayed()->cmp( Otos_i, O3 ); 1715 __ br( Assembler::greater, false, Assembler::pn, default_case); 1716 // lookup dispatch offset 1717 __ delayed()->sub(Otos_i, O2, O2); 1718 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1719 __ sll(O2, LogBytesPerInt, O2); 1720 __ add(O2, 3 * BytesPerInt, O2); 1721 __ ba(continue_execution); 1722 __ delayed()->ld(O1, O2, O2); 1723 // handle default 1724 __ bind(default_case); 1725 __ profile_switch_default(O3); 1726 __ ld(O1, 0, O2); // get default offset 1727 // continue execution 1728 __ bind(continue_execution); 1729 __ add(Lbcp, O2, Lbcp); 1730 __ dispatch_next(vtos, 0, true); 1731 } 1732 1733 1734 void TemplateTable::lookupswitch() { 1735 transition(itos, itos); 1736 __ stop("lookupswitch bytecode should have been rewritten"); 1737 } 1738 1739 void TemplateTable::fast_linearswitch() { 1740 transition(itos, vtos); 1741 Label loop_entry, loop, found, continue_execution; 1742 // align bcp 1743 __ add(Lbcp, BytesPerInt, O1); 1744 __ and3(O1, -BytesPerInt, O1); 1745 // set counter 1746 __ ld(O1, BytesPerInt, O2); 1747 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1748 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1749 __ ba(loop_entry); 1750 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1751 1752 // table search 1753 __ bind(loop); 1754 __ cmp(O4, Otos_i); 1755 __ br(Assembler::equal, true, Assembler::pn, found); 1756 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1757 __ inc(O3, 2 * BytesPerInt); 1758 1759 __ bind(loop_entry); 1760 __ cmp(O2, O3); 1761 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1762 __ delayed()->ld(O3, 0, O4); 1763 1764 // default case 1765 __ ld(O1, 0, O4); // get default offset 1766 if (ProfileInterpreter) { 1767 __ profile_switch_default(O3); 1768 __ ba_short(continue_execution); 1769 } 1770 1771 // entry found -> get offset 1772 __ bind(found); 1773 if (ProfileInterpreter) { 1774 __ sub(O3, O1, O3); 1775 __ sub(O3, 2*BytesPerInt, O3); 1776 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1777 __ profile_switch_case(O3, O1, O2, G3_scratch); 1778 1779 __ bind(continue_execution); 1780 } 1781 __ add(Lbcp, O4, Lbcp); 1782 __ dispatch_next(vtos, 0, true); 1783 } 1784 1785 1786 void TemplateTable::fast_binaryswitch() { 1787 transition(itos, vtos); 1788 // Implementation using the following core algorithm: (copied from Intel) 1789 // 1790 // int binary_search(int key, LookupswitchPair* array, int n) { 1791 // // Binary search according to "Methodik des Programmierens" by 1792 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1793 // int i = 0; 1794 // int j = n; 1795 // while (i+1 < j) { 1796 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1797 // // with Q: for all i: 0 <= i < n: key < a[i] 1798 // // where a stands for the array and assuming that the (inexisting) 1799 // // element a[n] is infinitely big. 1800 // int h = (i + j) >> 1; 1801 // // i < h < j 1802 // if (key < array[h].fast_match()) { 1803 // j = h; 1804 // } else { 1805 // i = h; 1806 // } 1807 // } 1808 // // R: a[i] <= key < a[i+1] or Q 1809 // // (i.e., if key is within array, i is the correct index) 1810 // return i; 1811 // } 1812 1813 // register allocation 1814 assert(Otos_i == O0, "alias checking"); 1815 const Register Rkey = Otos_i; // already set (tosca) 1816 const Register Rarray = O1; 1817 const Register Ri = O2; 1818 const Register Rj = O3; 1819 const Register Rh = O4; 1820 const Register Rscratch = O5; 1821 1822 const int log_entry_size = 3; 1823 const int entry_size = 1 << log_entry_size; 1824 1825 Label found; 1826 // Find Array start 1827 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1828 __ and3(Rarray, -BytesPerInt, Rarray); 1829 // initialize i & j (in delay slot) 1830 __ clr( Ri ); 1831 1832 // and start 1833 Label entry; 1834 __ ba(entry); 1835 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1836 // (Rj is already in the native byte-ordering.) 1837 1838 // binary search loop 1839 { Label loop; 1840 __ bind( loop ); 1841 // int h = (i + j) >> 1; 1842 __ sra( Rh, 1, Rh ); 1843 // if (key < array[h].fast_match()) { 1844 // j = h; 1845 // } else { 1846 // i = h; 1847 // } 1848 __ sll( Rh, log_entry_size, Rscratch ); 1849 __ ld( Rarray, Rscratch, Rscratch ); 1850 // (Rscratch is already in the native byte-ordering.) 1851 __ cmp( Rkey, Rscratch ); 1852 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1853 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1854 1855 // while (i+1 < j) 1856 __ bind( entry ); 1857 __ add( Ri, 1, Rscratch ); 1858 __ cmp(Rscratch, Rj); 1859 __ br( Assembler::less, true, Assembler::pt, loop ); 1860 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1861 } 1862 1863 // end of binary search, result index is i (must check again!) 1864 Label default_case; 1865 Label continue_execution; 1866 if (ProfileInterpreter) { 1867 __ mov( Ri, Rh ); // Save index in i for profiling 1868 } 1869 __ sll( Ri, log_entry_size, Ri ); 1870 __ ld( Rarray, Ri, Rscratch ); 1871 // (Rscratch is already in the native byte-ordering.) 1872 __ cmp( Rkey, Rscratch ); 1873 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1874 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1875 1876 // entry found -> j = offset 1877 __ inc( Ri, BytesPerInt ); 1878 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1879 __ ld( Rarray, Ri, Rj ); 1880 // (Rj is already in the native byte-ordering.) 1881 1882 if (ProfileInterpreter) { 1883 __ ba_short(continue_execution); 1884 } 1885 1886 __ bind(default_case); // fall through (if not profiling) 1887 __ profile_switch_default(Ri); 1888 1889 __ bind(continue_execution); 1890 __ add( Lbcp, Rj, Lbcp ); 1891 __ dispatch_next(vtos, 0, true); 1892 } 1893 1894 1895 void TemplateTable::_return(TosState state) { 1896 transition(state, state); 1897 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1898 1899 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1900 assert(state == vtos, "only valid state"); 1901 __ mov(G0, G3_scratch); 1902 __ access_local_ptr(G3_scratch, Otos_i); 1903 __ load_klass(Otos_i, O2); 1904 __ set(JVM_ACC_HAS_FINALIZER, G3); 1905 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 1906 __ andcc(G3, O2, G0); 1907 Label skip_register_finalizer; 1908 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 1909 __ delayed()->nop(); 1910 1911 // Call out to do finalizer registration 1912 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 1913 1914 __ bind(skip_register_finalizer); 1915 } 1916 1917 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 1918 Label no_safepoint; 1919 __ ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0); 1920 __ btst(SafepointMechanism::poll_bit(), G3_scratch); 1921 __ br(Assembler::zero, false, Assembler::pt, no_safepoint); 1922 __ delayed()->nop(); 1923 __ push(state); 1924 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 1925 __ pop(state); 1926 __ bind(no_safepoint); 1927 } 1928 1929 // Narrow result if state is itos but result type is smaller. 1930 // Need to narrow in the return bytecode rather than in generate_return_entry 1931 // since compiled code callers expect the result to already be narrowed. 1932 if (state == itos) { 1933 __ narrow(Otos_i); 1934 } 1935 __ remove_activation(state, /* throw_monitor_exception */ true); 1936 1937 // The caller's SP was adjusted upon method entry to accomodate 1938 // the callee's non-argument locals. Undo that adjustment. 1939 __ ret(); // return to caller 1940 __ delayed()->restore(I5_savedSP, G0, SP); 1941 } 1942 1943 1944 // ---------------------------------------------------------------------------- 1945 // Volatile variables demand their effects be made known to all CPU's in 1946 // order. Store buffers on most chips allow reads & writes to reorder; the 1947 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1948 // memory barrier (i.e., it's not sufficient that the interpreter does not 1949 // reorder volatile references, the hardware also must not reorder them). 1950 // 1951 // According to the new Java Memory Model (JMM): 1952 // (1) All volatiles are serialized wrt to each other. 1953 // ALSO reads & writes act as aquire & release, so: 1954 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 1955 // the read float up to before the read. It's OK for non-volatile memory refs 1956 // that happen before the volatile read to float down below it. 1957 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1958 // that happen BEFORE the write float down to after the write. It's OK for 1959 // non-volatile memory refs that happen after the volatile write to float up 1960 // before it. 1961 // 1962 // We only put in barriers around volatile refs (they are expensive), not 1963 // _between_ memory refs (that would require us to track the flavor of the 1964 // previous memory refs). Requirements (2) and (3) require some barriers 1965 // before volatile stores and after volatile loads. These nearly cover 1966 // requirement (1) but miss the volatile-store-volatile-load case. This final 1967 // case is placed after volatile-stores although it could just as well go 1968 // before volatile-loads. 1969 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 1970 // Helper function to insert a is-volatile test and memory barrier 1971 // All current sparc implementations run in TSO, needing only StoreLoad 1972 if ((order_constraint & Assembler::StoreLoad) == 0) return; 1973 __ membar( order_constraint ); 1974 } 1975 1976 // ---------------------------------------------------------------------------- 1977 void TemplateTable::resolve_cache_and_index(int byte_no, 1978 Register Rcache, 1979 Register index, 1980 size_t index_size) { 1981 // Depends on cpCacheOop layout! 1982 1983 Label resolved; 1984 Bytecodes::Code code = bytecode(); 1985 switch (code) { 1986 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 1987 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 1988 } 1989 1990 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 1991 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 1992 __ cmp(Lbyte_code, code); // have we resolved this bytecode? 1993 __ br(Assembler::equal, false, Assembler::pt, resolved); 1994 __ delayed()->set(code, O1); 1995 1996 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 1997 // first time invocation - must resolve first 1998 __ call_VM(noreg, entry, O1); 1999 // Update registers with resolved info 2000 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2001 __ bind(resolved); 2002 } 2003 2004 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2005 Register method, 2006 Register itable_index, 2007 Register flags, 2008 bool is_invokevirtual, 2009 bool is_invokevfinal, 2010 bool is_invokedynamic) { 2011 // Uses both G3_scratch and G4_scratch 2012 Register cache = G3_scratch; 2013 Register index = G4_scratch; 2014 assert_different_registers(cache, method, itable_index); 2015 2016 // determine constant pool cache field offsets 2017 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2018 const int method_offset = in_bytes( 2019 ConstantPoolCache::base_offset() + 2020 ((byte_no == f2_byte) 2021 ? ConstantPoolCacheEntry::f2_offset() 2022 : ConstantPoolCacheEntry::f1_offset() 2023 ) 2024 ); 2025 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2026 ConstantPoolCacheEntry::flags_offset()); 2027 // access constant pool cache fields 2028 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2029 ConstantPoolCacheEntry::f2_offset()); 2030 2031 if (is_invokevfinal) { 2032 __ get_cache_and_index_at_bcp(cache, index, 1); 2033 __ ld_ptr(Address(cache, method_offset), method); 2034 } else { 2035 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2036 resolve_cache_and_index(byte_no, cache, index, index_size); 2037 __ ld_ptr(Address(cache, method_offset), method); 2038 } 2039 2040 if (itable_index != noreg) { 2041 // pick up itable or appendix index from f2 also: 2042 __ ld_ptr(Address(cache, index_offset), itable_index); 2043 } 2044 __ ld_ptr(Address(cache, flags_offset), flags); 2045 } 2046 2047 // The Rcache register must be set before call 2048 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2049 Register Rcache, 2050 Register index, 2051 Register Roffset, 2052 Register Rflags, 2053 bool is_static) { 2054 assert_different_registers(Rcache, Rflags, Roffset); 2055 2056 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2057 2058 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2059 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2060 if (is_static) { 2061 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2062 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2063 __ ld_ptr( Robj, mirror_offset, Robj); 2064 __ resolve_oop_handle(Robj); 2065 } 2066 } 2067 2068 // The registers Rcache and index expected to be set before call. 2069 // Correct values of the Rcache and index registers are preserved. 2070 void TemplateTable::jvmti_post_field_access(Register Rcache, 2071 Register index, 2072 bool is_static, 2073 bool has_tos) { 2074 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2075 2076 if (JvmtiExport::can_post_field_access()) { 2077 // Check to see if a field access watch has been set before we take 2078 // the time to call into the VM. 2079 Label Label1; 2080 assert_different_registers(Rcache, index, G1_scratch); 2081 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2082 __ load_contents(get_field_access_count_addr, G1_scratch); 2083 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2084 2085 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2086 2087 if (is_static) { 2088 __ clr(Otos_i); 2089 } else { 2090 if (has_tos) { 2091 // save object pointer before call_VM() clobbers it 2092 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2093 } else { 2094 // Load top of stack (do not pop the value off the stack); 2095 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2096 } 2097 __ verify_oop(Otos_i); 2098 } 2099 // Otos_i: object pointer or NULL if static 2100 // Rcache: cache entry pointer 2101 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2102 Otos_i, Rcache); 2103 if (!is_static && has_tos) { 2104 __ pop_ptr(Otos_i); // restore object pointer 2105 __ verify_oop(Otos_i); 2106 } 2107 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2108 __ bind(Label1); 2109 } 2110 } 2111 2112 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2113 transition(vtos, vtos); 2114 2115 Register Rcache = G3_scratch; 2116 Register index = G4_scratch; 2117 Register Rclass = Rcache; 2118 Register Roffset= G4_scratch; 2119 Register Rflags = G1_scratch; 2120 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2121 2122 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2123 jvmti_post_field_access(Rcache, index, is_static, false); 2124 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2125 2126 if (!is_static) { 2127 pop_and_check_object(Rclass); 2128 } else { 2129 __ verify_oop(Rclass); 2130 } 2131 2132 Label exit; 2133 2134 Assembler::Membar_mask_bits membar_bits = 2135 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2136 2137 if (__ membar_has_effect(membar_bits)) { 2138 // Get volatile flag 2139 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2140 __ and3(Rflags, Lscratch, Lscratch); 2141 } 2142 2143 Label checkVolatile; 2144 2145 // compute field type 2146 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; 2147 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2148 // Make sure we don't need to mask Rflags after the above shift 2149 ConstantPoolCacheEntry::verify_tos_state_shift(); 2150 2151 // Check atos before itos for getstatic, more likely (in Queens at least) 2152 __ cmp(Rflags, atos); 2153 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2154 __ delayed() ->cmp(Rflags, itos); 2155 2156 // atos 2157 __ load_heap_oop(Rclass, Roffset, Otos_i); 2158 __ verify_oop(Otos_i); 2159 __ push(atos); 2160 if (!is_static && rc == may_rewrite) { 2161 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2162 } 2163 __ ba(checkVolatile); 2164 __ delayed()->tst(Lscratch); 2165 2166 __ bind(notObj); 2167 2168 // cmp(Rflags, itos); 2169 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2170 __ delayed() ->cmp(Rflags, ltos); 2171 2172 // itos 2173 __ ld(Rclass, Roffset, Otos_i); 2174 __ push(itos); 2175 if (!is_static && rc == may_rewrite) { 2176 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2177 } 2178 __ ba(checkVolatile); 2179 __ delayed()->tst(Lscratch); 2180 2181 __ bind(notInt); 2182 2183 // cmp(Rflags, ltos); 2184 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2185 __ delayed() ->cmp(Rflags, btos); 2186 2187 // ltos 2188 // load must be atomic 2189 __ ld_long(Rclass, Roffset, Otos_l); 2190 __ push(ltos); 2191 if (!is_static && rc == may_rewrite) { 2192 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2193 } 2194 __ ba(checkVolatile); 2195 __ delayed()->tst(Lscratch); 2196 2197 __ bind(notLong); 2198 2199 // cmp(Rflags, btos); 2200 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2201 __ delayed() ->cmp(Rflags, ztos); 2202 2203 // btos 2204 __ ldsb(Rclass, Roffset, Otos_i); 2205 __ push(itos); 2206 if (!is_static && rc == may_rewrite) { 2207 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2208 } 2209 __ ba(checkVolatile); 2210 __ delayed()->tst(Lscratch); 2211 2212 __ bind(notByte); 2213 2214 // cmp(Rflags, ztos); 2215 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2216 __ delayed() ->cmp(Rflags, ctos); 2217 2218 // ztos 2219 __ ldsb(Rclass, Roffset, Otos_i); 2220 __ push(itos); 2221 if (!is_static && rc == may_rewrite) { 2222 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2223 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2224 } 2225 __ ba(checkVolatile); 2226 __ delayed()->tst(Lscratch); 2227 2228 __ bind(notBool); 2229 2230 // cmp(Rflags, ctos); 2231 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2232 __ delayed() ->cmp(Rflags, stos); 2233 2234 // ctos 2235 __ lduh(Rclass, Roffset, Otos_i); 2236 __ push(itos); 2237 if (!is_static && rc == may_rewrite) { 2238 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2239 } 2240 __ ba(checkVolatile); 2241 __ delayed()->tst(Lscratch); 2242 2243 __ bind(notChar); 2244 2245 // cmp(Rflags, stos); 2246 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2247 __ delayed() ->cmp(Rflags, ftos); 2248 2249 // stos 2250 __ ldsh(Rclass, Roffset, Otos_i); 2251 __ push(itos); 2252 if (!is_static && rc == may_rewrite) { 2253 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2254 } 2255 __ ba(checkVolatile); 2256 __ delayed()->tst(Lscratch); 2257 2258 __ bind(notShort); 2259 2260 2261 // cmp(Rflags, ftos); 2262 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2263 __ delayed() ->tst(Lscratch); 2264 2265 // ftos 2266 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2267 __ push(ftos); 2268 if (!is_static && rc == may_rewrite) { 2269 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2270 } 2271 __ ba(checkVolatile); 2272 __ delayed()->tst(Lscratch); 2273 2274 __ bind(notFloat); 2275 2276 2277 // dtos 2278 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2279 __ push(dtos); 2280 if (!is_static && rc == may_rewrite) { 2281 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2282 } 2283 2284 __ bind(checkVolatile); 2285 if (__ membar_has_effect(membar_bits)) { 2286 // __ tst(Lscratch); executed in delay slot 2287 __ br(Assembler::zero, false, Assembler::pt, exit); 2288 __ delayed()->nop(); 2289 volatile_barrier(membar_bits); 2290 } 2291 2292 __ bind(exit); 2293 } 2294 2295 void TemplateTable::getfield(int byte_no) { 2296 getfield_or_static(byte_no, false); 2297 } 2298 2299 void TemplateTable::nofast_getfield(int byte_no) { 2300 getfield_or_static(byte_no, false, may_not_rewrite); 2301 } 2302 2303 void TemplateTable::getstatic(int byte_no) { 2304 getfield_or_static(byte_no, true); 2305 } 2306 2307 void TemplateTable::fast_accessfield(TosState state) { 2308 transition(atos, state); 2309 Register Rcache = G3_scratch; 2310 Register index = G4_scratch; 2311 Register Roffset = G4_scratch; 2312 Register Rflags = Rcache; 2313 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2314 2315 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2316 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2317 2318 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2319 2320 __ null_check(Otos_i); 2321 __ verify_oop(Otos_i); 2322 2323 Label exit; 2324 2325 Assembler::Membar_mask_bits membar_bits = 2326 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2327 if (__ membar_has_effect(membar_bits)) { 2328 // Get volatile flag 2329 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2330 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2331 } 2332 2333 switch (bytecode()) { 2334 case Bytecodes::_fast_bgetfield: 2335 __ ldsb(Otos_i, Roffset, Otos_i); 2336 break; 2337 case Bytecodes::_fast_cgetfield: 2338 __ lduh(Otos_i, Roffset, Otos_i); 2339 break; 2340 case Bytecodes::_fast_sgetfield: 2341 __ ldsh(Otos_i, Roffset, Otos_i); 2342 break; 2343 case Bytecodes::_fast_igetfield: 2344 __ ld(Otos_i, Roffset, Otos_i); 2345 break; 2346 case Bytecodes::_fast_lgetfield: 2347 __ ld_long(Otos_i, Roffset, Otos_l); 2348 break; 2349 case Bytecodes::_fast_fgetfield: 2350 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2351 break; 2352 case Bytecodes::_fast_dgetfield: 2353 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2354 break; 2355 case Bytecodes::_fast_agetfield: 2356 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2357 break; 2358 default: 2359 ShouldNotReachHere(); 2360 } 2361 2362 if (__ membar_has_effect(membar_bits)) { 2363 __ btst(Lscratch, Rflags); 2364 __ br(Assembler::zero, false, Assembler::pt, exit); 2365 __ delayed()->nop(); 2366 volatile_barrier(membar_bits); 2367 __ bind(exit); 2368 } 2369 2370 if (state == atos) { 2371 __ verify_oop(Otos_i); // does not blow flags! 2372 } 2373 } 2374 2375 void TemplateTable::jvmti_post_fast_field_mod() { 2376 if (JvmtiExport::can_post_field_modification()) { 2377 // Check to see if a field modification watch has been set before we take 2378 // the time to call into the VM. 2379 Label done; 2380 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2381 __ load_contents(get_field_modification_count_addr, G4_scratch); 2382 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2383 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2384 __ verify_oop(G4_scratch); 2385 __ push_ptr(G4_scratch); // put the object pointer back on tos 2386 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2387 // Save tos values before call_VM() clobbers them. Since we have 2388 // to do it for every data type, we use the saved values as the 2389 // jvalue object. 2390 switch (bytecode()) { // save tos values before call_VM() clobbers them 2391 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2392 case Bytecodes::_fast_bputfield: // fall through 2393 case Bytecodes::_fast_zputfield: // fall through 2394 case Bytecodes::_fast_sputfield: // fall through 2395 case Bytecodes::_fast_cputfield: // fall through 2396 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2397 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2398 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2399 // get words in right order for use as jvalue object 2400 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2401 } 2402 // setup pointer to jvalue object 2403 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2404 // G4_scratch: object pointer 2405 // G1_scratch: cache entry pointer 2406 // G3_scratch: jvalue object on the stack 2407 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2408 switch (bytecode()) { // restore tos values 2409 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2410 case Bytecodes::_fast_bputfield: // fall through 2411 case Bytecodes::_fast_zputfield: // fall through 2412 case Bytecodes::_fast_sputfield: // fall through 2413 case Bytecodes::_fast_cputfield: // fall through 2414 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2415 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2416 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2417 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2418 } 2419 __ bind(done); 2420 } 2421 } 2422 2423 // The registers Rcache and index expected to be set before call. 2424 // The function may destroy various registers, just not the Rcache and index registers. 2425 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2426 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2427 2428 if (JvmtiExport::can_post_field_modification()) { 2429 // Check to see if a field modification watch has been set before we take 2430 // the time to call into the VM. 2431 Label Label1; 2432 assert_different_registers(Rcache, index, G1_scratch); 2433 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2434 __ load_contents(get_field_modification_count_addr, G1_scratch); 2435 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2436 2437 // The Rcache and index registers have been already set. 2438 // This allows to eliminate this call but the Rcache and index 2439 // registers must be correspondingly used after this line. 2440 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2441 2442 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2443 if (is_static) { 2444 // Life is simple. Null out the object pointer. 2445 __ clr(G4_scratch); 2446 } else { 2447 Register Rflags = G1_scratch; 2448 // Life is harder. The stack holds the value on top, followed by the 2449 // object. We don't know the size of the value, though; it could be 2450 // one or two words depending on its type. As a result, we must find 2451 // the type to determine where the object is. 2452 2453 Label two_word, valsizeknown; 2454 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2455 __ mov(Lesp, G4_scratch); 2456 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2457 // Make sure we don't need to mask Rflags after the above shift 2458 ConstantPoolCacheEntry::verify_tos_state_shift(); 2459 __ cmp(Rflags, ltos); 2460 __ br(Assembler::equal, false, Assembler::pt, two_word); 2461 __ delayed()->cmp(Rflags, dtos); 2462 __ br(Assembler::equal, false, Assembler::pt, two_word); 2463 __ delayed()->nop(); 2464 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2465 __ ba_short(valsizeknown); 2466 __ bind(two_word); 2467 2468 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2469 2470 __ bind(valsizeknown); 2471 // setup object pointer 2472 __ ld_ptr(G4_scratch, 0, G4_scratch); 2473 __ verify_oop(G4_scratch); 2474 } 2475 // setup pointer to jvalue object 2476 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2477 // G4_scratch: object pointer or NULL if static 2478 // G3_scratch: cache entry pointer 2479 // G1_scratch: jvalue object on the stack 2480 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2481 G4_scratch, G3_scratch, G1_scratch); 2482 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2483 __ bind(Label1); 2484 } 2485 } 2486 2487 void TemplateTable::pop_and_check_object(Register r) { 2488 __ pop_ptr(r); 2489 __ null_check(r); // for field access must check obj. 2490 __ verify_oop(r); 2491 } 2492 2493 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2494 transition(vtos, vtos); 2495 Register Rcache = G3_scratch; 2496 Register index = G4_scratch; 2497 Register Rclass = Rcache; 2498 Register Roffset= G4_scratch; 2499 Register Rflags = G1_scratch; 2500 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2501 2502 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2503 jvmti_post_field_mod(Rcache, index, is_static); 2504 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2505 2506 Assembler::Membar_mask_bits read_bits = 2507 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2508 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2509 2510 Label notVolatile, checkVolatile, exit; 2511 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2512 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2513 __ and3(Rflags, Lscratch, Lscratch); 2514 2515 if (__ membar_has_effect(read_bits)) { 2516 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2517 volatile_barrier(read_bits); 2518 __ bind(notVolatile); 2519 } 2520 } 2521 2522 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2523 // Make sure we don't need to mask Rflags after the above shift 2524 ConstantPoolCacheEntry::verify_tos_state_shift(); 2525 2526 // compute field type 2527 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat; 2528 2529 if (is_static) { 2530 // putstatic with object type most likely, check that first 2531 __ cmp(Rflags, atos); 2532 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2533 __ delayed()->cmp(Rflags, itos); 2534 2535 // atos 2536 { 2537 __ pop_ptr(); 2538 __ verify_oop(Otos_i); 2539 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2540 __ ba(checkVolatile); 2541 __ delayed()->tst(Lscratch); 2542 } 2543 2544 __ bind(notObj); 2545 // cmp(Rflags, itos); 2546 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2547 __ delayed()->cmp(Rflags, btos); 2548 2549 // itos 2550 { 2551 __ pop_i(); 2552 __ st(Otos_i, Rclass, Roffset); 2553 __ ba(checkVolatile); 2554 __ delayed()->tst(Lscratch); 2555 } 2556 2557 __ bind(notInt); 2558 } else { 2559 // putfield with int type most likely, check that first 2560 __ cmp(Rflags, itos); 2561 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2562 __ delayed()->cmp(Rflags, atos); 2563 2564 // itos 2565 { 2566 __ pop_i(); 2567 pop_and_check_object(Rclass); 2568 __ st(Otos_i, Rclass, Roffset); 2569 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2570 __ ba(checkVolatile); 2571 __ delayed()->tst(Lscratch); 2572 } 2573 2574 __ bind(notInt); 2575 // cmp(Rflags, atos); 2576 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2577 __ delayed()->cmp(Rflags, btos); 2578 2579 // atos 2580 { 2581 __ pop_ptr(); 2582 pop_and_check_object(Rclass); 2583 __ verify_oop(Otos_i); 2584 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2585 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2586 __ ba(checkVolatile); 2587 __ delayed()->tst(Lscratch); 2588 } 2589 2590 __ bind(notObj); 2591 } 2592 2593 // cmp(Rflags, btos); 2594 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2595 __ delayed()->cmp(Rflags, ztos); 2596 2597 // btos 2598 { 2599 __ pop_i(); 2600 if (!is_static) pop_and_check_object(Rclass); 2601 __ stb(Otos_i, Rclass, Roffset); 2602 if (!is_static && rc == may_rewrite) { 2603 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2604 } 2605 __ ba(checkVolatile); 2606 __ delayed()->tst(Lscratch); 2607 } 2608 2609 __ bind(notByte); 2610 2611 // cmp(Rflags, btos); 2612 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2613 __ delayed()->cmp(Rflags, ltos); 2614 2615 // ztos 2616 { 2617 __ pop_i(); 2618 if (!is_static) pop_and_check_object(Rclass); 2619 __ and3(Otos_i, 1, Otos_i); 2620 __ stb(Otos_i, Rclass, Roffset); 2621 if (!is_static && rc == may_rewrite) { 2622 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no); 2623 } 2624 __ ba(checkVolatile); 2625 __ delayed()->tst(Lscratch); 2626 } 2627 2628 __ bind(notBool); 2629 // cmp(Rflags, ltos); 2630 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2631 __ delayed()->cmp(Rflags, ctos); 2632 2633 // ltos 2634 { 2635 __ pop_l(); 2636 if (!is_static) pop_and_check_object(Rclass); 2637 __ st_long(Otos_l, Rclass, Roffset); 2638 if (!is_static && rc == may_rewrite) { 2639 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2640 } 2641 __ ba(checkVolatile); 2642 __ delayed()->tst(Lscratch); 2643 } 2644 2645 __ bind(notLong); 2646 // cmp(Rflags, ctos); 2647 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2648 __ delayed()->cmp(Rflags, stos); 2649 2650 // ctos (char) 2651 { 2652 __ pop_i(); 2653 if (!is_static) pop_and_check_object(Rclass); 2654 __ sth(Otos_i, Rclass, Roffset); 2655 if (!is_static && rc == may_rewrite) { 2656 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2657 } 2658 __ ba(checkVolatile); 2659 __ delayed()->tst(Lscratch); 2660 } 2661 2662 __ bind(notChar); 2663 // cmp(Rflags, stos); 2664 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2665 __ delayed()->cmp(Rflags, ftos); 2666 2667 // stos (short) 2668 { 2669 __ pop_i(); 2670 if (!is_static) pop_and_check_object(Rclass); 2671 __ sth(Otos_i, Rclass, Roffset); 2672 if (!is_static && rc == may_rewrite) { 2673 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2674 } 2675 __ ba(checkVolatile); 2676 __ delayed()->tst(Lscratch); 2677 } 2678 2679 __ bind(notShort); 2680 // cmp(Rflags, ftos); 2681 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2682 __ delayed()->nop(); 2683 2684 // ftos 2685 { 2686 __ pop_f(); 2687 if (!is_static) pop_and_check_object(Rclass); 2688 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2689 if (!is_static && rc == may_rewrite) { 2690 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2691 } 2692 __ ba(checkVolatile); 2693 __ delayed()->tst(Lscratch); 2694 } 2695 2696 __ bind(notFloat); 2697 2698 // dtos 2699 { 2700 __ pop_d(); 2701 if (!is_static) pop_and_check_object(Rclass); 2702 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2703 if (!is_static && rc == may_rewrite) { 2704 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2705 } 2706 } 2707 2708 __ bind(checkVolatile); 2709 __ tst(Lscratch); 2710 2711 if (__ membar_has_effect(write_bits)) { 2712 // __ tst(Lscratch); in delay slot 2713 __ br(Assembler::zero, false, Assembler::pt, exit); 2714 __ delayed()->nop(); 2715 volatile_barrier(Assembler::StoreLoad); 2716 __ bind(exit); 2717 } 2718 } 2719 2720 void TemplateTable::fast_storefield(TosState state) { 2721 transition(state, vtos); 2722 Register Rcache = G3_scratch; 2723 Register Rclass = Rcache; 2724 Register Roffset= G4_scratch; 2725 Register Rflags = G1_scratch; 2726 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2727 2728 jvmti_post_fast_field_mod(); 2729 2730 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2731 2732 Assembler::Membar_mask_bits read_bits = 2733 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2734 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2735 2736 Label notVolatile, checkVolatile, exit; 2737 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2738 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2739 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2740 __ and3(Rflags, Lscratch, Lscratch); 2741 if (__ membar_has_effect(read_bits)) { 2742 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2743 volatile_barrier(read_bits); 2744 __ bind(notVolatile); 2745 } 2746 } 2747 2748 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2749 pop_and_check_object(Rclass); 2750 2751 switch (bytecode()) { 2752 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield 2753 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2754 case Bytecodes::_fast_cputfield: /* fall through */ 2755 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2756 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2757 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2758 case Bytecodes::_fast_fputfield: 2759 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2760 break; 2761 case Bytecodes::_fast_dputfield: 2762 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2763 break; 2764 case Bytecodes::_fast_aputfield: 2765 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2766 break; 2767 default: 2768 ShouldNotReachHere(); 2769 } 2770 2771 if (__ membar_has_effect(write_bits)) { 2772 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2773 volatile_barrier(Assembler::StoreLoad); 2774 __ bind(exit); 2775 } 2776 } 2777 2778 void TemplateTable::putfield(int byte_no) { 2779 putfield_or_static(byte_no, false); 2780 } 2781 2782 void TemplateTable::nofast_putfield(int byte_no) { 2783 putfield_or_static(byte_no, false, may_not_rewrite); 2784 } 2785 2786 void TemplateTable::putstatic(int byte_no) { 2787 putfield_or_static(byte_no, true); 2788 } 2789 2790 void TemplateTable::fast_xaccess(TosState state) { 2791 transition(vtos, state); 2792 Register Rcache = G3_scratch; 2793 Register Roffset = G4_scratch; 2794 Register Rflags = G4_scratch; 2795 Register Rreceiver = Lscratch; 2796 2797 __ ld_ptr(Llocals, 0, Rreceiver); 2798 2799 // access constant pool cache (is resolved) 2800 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2801 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2802 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2803 2804 __ verify_oop(Rreceiver); 2805 __ null_check(Rreceiver); 2806 if (state == atos) { 2807 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2808 } else if (state == itos) { 2809 __ ld (Rreceiver, Roffset, Otos_i) ; 2810 } else if (state == ftos) { 2811 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2812 } else { 2813 ShouldNotReachHere(); 2814 } 2815 2816 Assembler::Membar_mask_bits membar_bits = 2817 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2818 if (__ membar_has_effect(membar_bits)) { 2819 2820 // Get is_volatile value in Rflags and check if membar is needed 2821 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2822 2823 // Test volatile 2824 Label notVolatile; 2825 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2826 __ btst(Rflags, Lscratch); 2827 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2828 __ delayed()->nop(); 2829 volatile_barrier(membar_bits); 2830 __ bind(notVolatile); 2831 } 2832 2833 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2834 __ sub(Lbcp, 1, Lbcp); 2835 } 2836 2837 //---------------------------------------------------------------------------------------------------- 2838 // Calls 2839 2840 void TemplateTable::count_calls(Register method, Register temp) { 2841 // implemented elsewhere 2842 ShouldNotReachHere(); 2843 } 2844 2845 void TemplateTable::prepare_invoke(int byte_no, 2846 Register method, // linked method (or i-klass) 2847 Register ra, // return address 2848 Register index, // itable index, MethodType, etc. 2849 Register recv, // if caller wants to see it 2850 Register flags // if caller wants to test it 2851 ) { 2852 // determine flags 2853 const Bytecodes::Code code = bytecode(); 2854 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2855 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2856 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2857 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2858 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2859 const bool load_receiver = (recv != noreg); 2860 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2861 assert(recv == noreg || recv == O0, ""); 2862 assert(flags == noreg || flags == O1, ""); 2863 2864 // setup registers & access constant pool cache 2865 if (recv == noreg) recv = O0; 2866 if (flags == noreg) flags = O1; 2867 const Register temp = O2; 2868 assert_different_registers(method, ra, index, recv, flags, temp); 2869 2870 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2871 2872 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2873 2874 // maybe push appendix to arguments 2875 if (is_invokedynamic || is_invokehandle) { 2876 Label L_no_push; 2877 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2878 __ btst(flags, temp); 2879 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2880 __ delayed()->nop(); 2881 // Push the appendix as a trailing parameter. 2882 // This must be done before we get the receiver, 2883 // since the parameter_size includes it. 2884 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2885 __ load_resolved_reference_at_index(temp, index); 2886 __ verify_oop(temp); 2887 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2888 __ bind(L_no_push); 2889 } 2890 2891 // load receiver if needed (after appendix is pushed so parameter size is correct) 2892 if (load_receiver) { 2893 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2894 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2895 __ verify_oop(recv); 2896 } 2897 2898 // compute return type 2899 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2900 // Make sure we don't need to mask flags after the above shift 2901 ConstantPoolCacheEntry::verify_tos_state_shift(); 2902 // load return address 2903 { 2904 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2905 AddressLiteral table(table_addr); 2906 __ set(table, temp); 2907 __ sll(ra, LogBytesPerWord, ra); 2908 __ ld_ptr(Address(temp, ra), ra); 2909 } 2910 } 2911 2912 2913 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2914 Register Rtemp = G4_scratch; 2915 Register Rcall = Rindex; 2916 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2917 2918 // get target Method* & entry point 2919 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2920 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 2921 __ profile_called_method(G5_method, Rtemp); 2922 __ call_from_interpreter(Rcall, Gargs, Rret); 2923 } 2924 2925 void TemplateTable::invokevirtual(int byte_no) { 2926 transition(vtos, vtos); 2927 assert(byte_no == f2_byte, "use this argument"); 2928 2929 Register Rscratch = G3_scratch; 2930 Register Rtemp = G4_scratch; 2931 Register Rret = Lscratch; 2932 Register O0_recv = O0; 2933 Label notFinal; 2934 2935 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2936 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2937 2938 // Check for vfinal 2939 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2940 __ btst(Rret, G4_scratch); 2941 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2942 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2943 2944 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 2945 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 2946 } 2947 2948 invokevfinal_helper(Rscratch, Rret); 2949 2950 __ bind(notFinal); 2951 2952 __ mov(G5_method, Rscratch); // better scratch register 2953 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 2954 // receiver is in O0_recv 2955 __ verify_oop(O0_recv); 2956 2957 // get return address 2958 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2959 __ set(table, Rtemp); 2960 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2961 // Make sure we don't need to mask Rret after the above shift 2962 ConstantPoolCacheEntry::verify_tos_state_shift(); 2963 __ sll(Rret, LogBytesPerWord, Rret); 2964 __ ld_ptr(Rtemp, Rret, Rret); // get return address 2965 2966 // get receiver klass 2967 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 2968 __ load_klass(O0_recv, O0_recv); 2969 __ verify_klass_ptr(O0_recv); 2970 2971 __ profile_virtual_call(O0_recv, O4); 2972 2973 generate_vtable_call(O0_recv, Rscratch, Rret); 2974 } 2975 2976 void TemplateTable::fast_invokevfinal(int byte_no) { 2977 transition(vtos, vtos); 2978 assert(byte_no == f2_byte, "use this argument"); 2979 2980 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 2981 /*is_invokevfinal*/true, false); 2982 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2983 invokevfinal_helper(G3_scratch, Lscratch); 2984 } 2985 2986 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 2987 Register Rtemp = G4_scratch; 2988 2989 // Load receiver from stack slot 2990 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 2991 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 2992 __ load_receiver(G4_scratch, O0); 2993 2994 // receiver NULL check 2995 __ null_check(O0); 2996 2997 __ profile_final_call(O4); 2998 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 2999 3000 // get return address 3001 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3002 __ set(table, Rtemp); 3003 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3004 // Make sure we don't need to mask Rret after the above shift 3005 ConstantPoolCacheEntry::verify_tos_state_shift(); 3006 __ sll(Rret, LogBytesPerWord, Rret); 3007 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3008 3009 3010 // do the call 3011 __ call_from_interpreter(Rscratch, Gargs, Rret); 3012 } 3013 3014 3015 void TemplateTable::invokespecial(int byte_no) { 3016 transition(vtos, vtos); 3017 assert(byte_no == f1_byte, "use this argument"); 3018 3019 const Register Rret = Lscratch; 3020 const Register O0_recv = O0; 3021 const Register Rscratch = G3_scratch; 3022 3023 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3024 __ null_check(O0_recv); 3025 3026 // do the call 3027 __ profile_call(O4); 3028 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3029 __ call_from_interpreter(Rscratch, Gargs, Rret); 3030 } 3031 3032 3033 void TemplateTable::invokestatic(int byte_no) { 3034 transition(vtos, vtos); 3035 assert(byte_no == f1_byte, "use this argument"); 3036 3037 const Register Rret = Lscratch; 3038 const Register Rscratch = G3_scratch; 3039 3040 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3041 3042 // do the call 3043 __ profile_call(O4); 3044 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3045 __ call_from_interpreter(Rscratch, Gargs, Rret); 3046 } 3047 3048 void TemplateTable::invokeinterface_object_method(Register RKlass, 3049 Register Rcall, 3050 Register Rret, 3051 Register Rflags) { 3052 Register Rscratch = G4_scratch; 3053 Register Rindex = Lscratch; 3054 3055 assert_different_registers(Rscratch, Rindex, Rret); 3056 3057 Label notFinal; 3058 3059 // Check for vfinal 3060 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3061 __ btst(Rflags, Rscratch); 3062 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3063 __ delayed()->nop(); 3064 3065 __ profile_final_call(O4); 3066 3067 // do the call - the index (f2) contains the Method* 3068 assert_different_registers(G5_method, Gargs, Rcall); 3069 __ mov(Rindex, G5_method); 3070 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3071 __ call_from_interpreter(Rcall, Gargs, Rret); 3072 __ bind(notFinal); 3073 3074 __ profile_virtual_call(RKlass, O4); 3075 generate_vtable_call(RKlass, Rindex, Rret); 3076 } 3077 3078 3079 void TemplateTable::invokeinterface(int byte_no) { 3080 transition(vtos, vtos); 3081 assert(byte_no == f1_byte, "use this argument"); 3082 3083 const Register Rinterface = G1_scratch; 3084 const Register Rmethod = Lscratch; 3085 const Register Rret = G3_scratch; 3086 const Register O0_recv = O0; 3087 const Register O1_flags = O1; 3088 const Register O2_Klass = O2; 3089 const Register Rscratch = G4_scratch; 3090 assert_different_registers(Rscratch, G5_method); 3091 3092 prepare_invoke(byte_no, Rinterface, Rret, Rmethod, O0_recv, O1_flags); 3093 3094 // get receiver klass 3095 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3096 __ load_klass(O0_recv, O2_Klass); 3097 3098 // Special case of invokeinterface called for virtual method of 3099 // java.lang.Object. See cpCacheOop.cpp for details. 3100 // This code isn't produced by javac, but could be produced by 3101 // another compliant java compiler. 3102 Label notMethod; 3103 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3104 __ btst(O1_flags, Rscratch); 3105 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3106 __ delayed()->nop(); 3107 3108 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3109 3110 __ bind(notMethod); 3111 3112 Register Rtemp = O1_flags; 3113 3114 Label L_no_such_interface; 3115 3116 // Receiver subtype check against REFC. 3117 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3118 O2_Klass, Rinterface, noreg, 3119 // outputs: temp reg1, temp reg2, temp reg3 3120 G5_method, Rscratch, Rtemp, 3121 L_no_such_interface, 3122 /*return_method=*/false); 3123 3124 __ profile_virtual_call(O2_Klass, O4); 3125 3126 // 3127 // find entry point to call 3128 // 3129 3130 // Get declaring interface class from method 3131 __ ld_ptr(Rmethod, Method::const_offset(), Rinterface); 3132 __ ld_ptr(Rinterface, ConstMethod::constants_offset(), Rinterface); 3133 __ ld_ptr(Rinterface, ConstantPool::pool_holder_offset_in_bytes(), Rinterface); 3134 3135 // Get itable index from method 3136 const Register Rindex = G5_method; 3137 __ ld(Rmethod, Method::itable_index_offset(), Rindex); 3138 __ sub(Rindex, Method::itable_index_max, Rindex); 3139 __ neg(Rindex); 3140 3141 // Preserve O2_Klass for throw_AbstractMethodErrorVerbose 3142 __ mov(O2_Klass, O4); 3143 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3144 O4, Rinterface, Rindex, 3145 // outputs: method, scan temp reg, temp reg 3146 G5_method, Rscratch, Rtemp, 3147 L_no_such_interface); 3148 3149 // Check for abstract method error. 3150 { 3151 Label ok; 3152 __ br_notnull_short(G5_method, Assembler::pt, ok); 3153 // Pass arguments for generating a verbose error message. 3154 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3155 O2_Klass, Rmethod); 3156 __ should_not_reach_here(); 3157 __ bind(ok); 3158 } 3159 3160 Register Rcall = Rinterface; 3161 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3162 3163 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3164 __ profile_called_method(G5_method, Rscratch); 3165 __ call_from_interpreter(Rcall, Gargs, Rret); 3166 3167 __ bind(L_no_such_interface); 3168 // Pass arguments for generating a verbose error message. 3169 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3170 O2_Klass, Rinterface); 3171 __ should_not_reach_here(); 3172 } 3173 3174 void TemplateTable::invokehandle(int byte_no) { 3175 transition(vtos, vtos); 3176 assert(byte_no == f1_byte, "use this argument"); 3177 3178 const Register Rret = Lscratch; 3179 const Register G4_mtype = G4_scratch; 3180 const Register O0_recv = O0; 3181 const Register Rscratch = G3_scratch; 3182 3183 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3184 __ null_check(O0_recv); 3185 3186 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3187 // G5: MH.invokeExact_MT method (from f2) 3188 3189 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3190 3191 // do the call 3192 __ verify_oop(G4_mtype); 3193 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3194 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3195 __ call_from_interpreter(Rscratch, Gargs, Rret); 3196 } 3197 3198 3199 void TemplateTable::invokedynamic(int byte_no) { 3200 transition(vtos, vtos); 3201 assert(byte_no == f1_byte, "use this argument"); 3202 3203 const Register Rret = Lscratch; 3204 const Register G4_callsite = G4_scratch; 3205 const Register Rscratch = G3_scratch; 3206 3207 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3208 3209 // G4: CallSite object (from cpool->resolved_references[f1]) 3210 // G5: MH.linkToCallSite method (from f2) 3211 3212 // Note: G4_callsite is already pushed by prepare_invoke 3213 3214 // %%% should make a type profile for any invokedynamic that takes a ref argument 3215 // profile this call 3216 __ profile_call(O4); 3217 3218 // do the call 3219 __ verify_oop(G4_callsite); 3220 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3221 __ call_from_interpreter(Rscratch, Gargs, Rret); 3222 } 3223 3224 3225 //---------------------------------------------------------------------------------------------------- 3226 // Allocation 3227 3228 void TemplateTable::_new() { 3229 transition(vtos, atos); 3230 3231 Label slow_case; 3232 Label done; 3233 Label initialize_header; 3234 Label initialize_object; // including clearing the fields 3235 3236 Register RallocatedObject = Otos_i; 3237 Register RinstanceKlass = O1; 3238 Register Roffset = O3; 3239 Register Rscratch = O4; 3240 3241 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3242 __ get_cpool_and_tags(Rscratch, G3_scratch); 3243 // make sure the class we're about to instantiate has been resolved 3244 // This is done before loading InstanceKlass to be consistent with the order 3245 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3246 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3247 __ ldub(G3_scratch, Roffset, G3_scratch); 3248 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3249 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3250 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3251 // get InstanceKlass 3252 __ load_resolved_klass_at_offset(Rscratch, Roffset, RinstanceKlass); 3253 3254 // make sure klass is fully initialized: 3255 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3256 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3257 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3258 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3259 3260 // get instance_size in InstanceKlass (already aligned) 3261 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3262 3263 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3264 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3265 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3266 __ delayed()->nop(); 3267 3268 // Allocate the instance: 3269 // If TLAB is enabled: 3270 // Try to allocate in the TLAB. 3271 // If fails, go to the slow path. 3272 // Else If inline contiguous allocations are enabled: 3273 // Try to allocate in eden. 3274 // If fails due to heap end, go to slow path. 3275 // 3276 // If TLAB is enabled OR inline contiguous is enabled: 3277 // Initialize the allocation. 3278 // Exit. 3279 // 3280 // Go to slow path. 3281 3282 const bool allow_shared_alloc = 3283 Universe::heap()->supports_inline_contig_alloc(); 3284 3285 if(UseTLAB) { 3286 Register RoldTopValue = RallocatedObject; 3287 Register RtlabWasteLimitValue = G3_scratch; 3288 Register RnewTopValue = G1_scratch; 3289 Register RendValue = Rscratch; 3290 Register RfreeValue = RnewTopValue; 3291 3292 // check if we can allocate in the TLAB 3293 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3294 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3295 __ add(RoldTopValue, Roffset, RnewTopValue); 3296 3297 // if there is enough space, we do not CAS and do not clear 3298 __ cmp(RnewTopValue, RendValue); 3299 if(ZeroTLAB) { 3300 // the fields have already been cleared 3301 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3302 } else { 3303 // initialize both the header and fields 3304 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3305 } 3306 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3307 3308 // Allocation does not fit in the TLAB. 3309 __ ba_short(slow_case); 3310 } else { 3311 // Allocation in the shared Eden 3312 if (allow_shared_alloc) { 3313 Register RoldTopValue = G1_scratch; 3314 Register RtopAddr = G3_scratch; 3315 Register RnewTopValue = RallocatedObject; 3316 Register RendValue = Rscratch; 3317 3318 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3319 3320 Label retry; 3321 __ bind(retry); 3322 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3323 __ ld_ptr(RendValue, 0, RendValue); 3324 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3325 __ add(RoldTopValue, Roffset, RnewTopValue); 3326 3327 // RnewTopValue contains the top address after the new object 3328 // has been allocated. 3329 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3330 3331 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3332 3333 // if someone beat us on the allocation, try again, otherwise continue 3334 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3335 3336 // bump total bytes allocated by this thread 3337 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3338 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3339 } 3340 } 3341 3342 // If UseTLAB or allow_shared_alloc are true, the object is created above and 3343 // there is an initialize need. Otherwise, skip and go to the slow path. 3344 if (UseTLAB || allow_shared_alloc) { 3345 // clear object fields 3346 __ bind(initialize_object); 3347 __ deccc(Roffset, sizeof(oopDesc)); 3348 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3349 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3350 3351 // initialize remaining object fields 3352 if (UseBlockZeroing) { 3353 // Use BIS for zeroing 3354 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3355 } else { 3356 Label loop; 3357 __ subcc(Roffset, wordSize, Roffset); 3358 __ bind(loop); 3359 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3360 __ st_ptr(G0, G3_scratch, Roffset); 3361 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3362 __ delayed()->subcc(Roffset, wordSize, Roffset); 3363 } 3364 __ ba_short(initialize_header); 3365 } 3366 3367 // slow case 3368 __ bind(slow_case); 3369 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3370 __ get_constant_pool(O1); 3371 3372 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3373 3374 __ ba_short(done); 3375 3376 // Initialize the header: mark, klass 3377 __ bind(initialize_header); 3378 3379 if (UseBiasedLocking) { 3380 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3381 } else { 3382 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3383 } 3384 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3385 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3386 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3387 3388 { 3389 SkipIfEqual skip_if( 3390 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3391 // Trigger dtrace event 3392 __ push(atos); 3393 __ call_VM_leaf(noreg, 3394 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3395 __ pop(atos); 3396 } 3397 3398 // continue 3399 __ bind(done); 3400 } 3401 3402 3403 3404 void TemplateTable::newarray() { 3405 transition(itos, atos); 3406 __ ldub(Lbcp, 1, O1); 3407 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3408 } 3409 3410 3411 void TemplateTable::anewarray() { 3412 transition(itos, atos); 3413 __ get_constant_pool(O1); 3414 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3415 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3416 } 3417 3418 3419 void TemplateTable::arraylength() { 3420 transition(atos, itos); 3421 Label ok; 3422 __ verify_oop(Otos_i); 3423 __ tst(Otos_i); 3424 __ throw_if_not_1_x( Assembler::notZero, ok ); 3425 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3426 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3427 } 3428 3429 3430 void TemplateTable::checkcast() { 3431 transition(atos, atos); 3432 Label done, is_null, quicked, cast_ok, resolved; 3433 Register Roffset = G1_scratch; 3434 Register RobjKlass = O5; 3435 Register RspecifiedKlass = O4; 3436 3437 // Check for casting a NULL 3438 __ br_null(Otos_i, false, Assembler::pn, is_null); 3439 __ delayed()->nop(); 3440 3441 // Get value klass in RobjKlass 3442 __ load_klass(Otos_i, RobjKlass); // get value klass 3443 3444 // Get constant pool tag 3445 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3446 3447 // See if the checkcast has been quickened 3448 __ get_cpool_and_tags(Lscratch, G3_scratch); 3449 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3450 __ ldub(G3_scratch, Roffset, G3_scratch); 3451 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3452 __ br(Assembler::equal, true, Assembler::pt, quicked); 3453 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3454 3455 __ push_ptr(); // save receiver for result, and for GC 3456 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3457 __ get_vm_result_2(RspecifiedKlass); 3458 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3459 3460 __ ba_short(resolved); 3461 3462 // Extract target class from constant pool 3463 __ bind(quicked); 3464 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass); 3465 3466 3467 __ bind(resolved); 3468 __ load_klass(Otos_i, RobjKlass); // get value klass 3469 3470 // Generate a fast subtype check. Branch to cast_ok if no 3471 // failure. Throw exception if failure. 3472 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3473 3474 // Not a subtype; so must throw exception 3475 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3476 3477 __ bind(cast_ok); 3478 3479 if (ProfileInterpreter) { 3480 __ ba_short(done); 3481 } 3482 __ bind(is_null); 3483 __ profile_null_seen(G3_scratch); 3484 __ bind(done); 3485 } 3486 3487 3488 void TemplateTable::instanceof() { 3489 Label done, is_null, quicked, resolved; 3490 transition(atos, itos); 3491 Register Roffset = G1_scratch; 3492 Register RobjKlass = O5; 3493 Register RspecifiedKlass = O4; 3494 3495 // Check for casting a NULL 3496 __ br_null(Otos_i, false, Assembler::pt, is_null); 3497 __ delayed()->nop(); 3498 3499 // Get value klass in RobjKlass 3500 __ load_klass(Otos_i, RobjKlass); // get value klass 3501 3502 // Get constant pool tag 3503 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3504 3505 // See if the checkcast has been quickened 3506 __ get_cpool_and_tags(Lscratch, G3_scratch); 3507 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3508 __ ldub(G3_scratch, Roffset, G3_scratch); 3509 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3510 __ br(Assembler::equal, true, Assembler::pt, quicked); 3511 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3512 3513 __ push_ptr(); // save receiver for result, and for GC 3514 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3515 __ get_vm_result_2(RspecifiedKlass); 3516 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3517 3518 __ ba_short(resolved); 3519 3520 // Extract target class from constant pool 3521 __ bind(quicked); 3522 __ get_constant_pool(Lscratch); 3523 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass); 3524 3525 __ bind(resolved); 3526 __ load_klass(Otos_i, RobjKlass); // get value klass 3527 3528 // Generate a fast subtype check. Branch to cast_ok if no 3529 // failure. Return 0 if failure. 3530 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3531 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3532 // Not a subtype; return 0; 3533 __ clr( Otos_i ); 3534 3535 if (ProfileInterpreter) { 3536 __ ba_short(done); 3537 } 3538 __ bind(is_null); 3539 __ profile_null_seen(G3_scratch); 3540 __ bind(done); 3541 } 3542 3543 void TemplateTable::_breakpoint() { 3544 3545 // Note: We get here even if we are single stepping.. 3546 // jbug insists on setting breakpoints at every bytecode 3547 // even if we are in single step mode. 3548 3549 transition(vtos, vtos); 3550 // get the unpatched byte code 3551 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3552 __ mov(O0, Lbyte_code); 3553 3554 // post the breakpoint event 3555 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3556 3557 // complete the execution of original bytecode 3558 __ dispatch_normal(vtos); 3559 } 3560 3561 3562 //---------------------------------------------------------------------------------------------------- 3563 // Exceptions 3564 3565 void TemplateTable::athrow() { 3566 transition(atos, vtos); 3567 3568 // This works because exception is cached in Otos_i which is same as O0, 3569 // which is same as what throw_exception_entry_expects 3570 assert(Otos_i == Oexception, "see explanation above"); 3571 3572 __ verify_oop(Otos_i); 3573 __ null_check(Otos_i); 3574 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3575 } 3576 3577 3578 //---------------------------------------------------------------------------------------------------- 3579 // Synchronization 3580 3581 3582 // See frame_sparc.hpp for monitor block layout. 3583 // Monitor elements are dynamically allocated by growing stack as needed. 3584 3585 void TemplateTable::monitorenter() { 3586 transition(atos, vtos); 3587 __ verify_oop(Otos_i); 3588 // Try to acquire a lock on the object 3589 // Repeat until succeeded (i.e., until 3590 // monitorenter returns true). 3591 3592 { Label ok; 3593 __ tst(Otos_i); 3594 __ throw_if_not_1_x( Assembler::notZero, ok); 3595 __ delayed()->mov(Otos_i, Lscratch); // save obj 3596 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3597 } 3598 3599 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3600 3601 // find a free slot in the monitor block 3602 3603 3604 // initialize entry pointer 3605 __ clr(O1); // points to free slot or NULL 3606 3607 { 3608 Label entry, loop, exit; 3609 __ add( __ top_most_monitor(), O2 ); // last one to check 3610 __ ba( entry ); 3611 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3612 3613 3614 __ bind( loop ); 3615 3616 __ verify_oop(O4); // verify each monitor's oop 3617 __ tst(O4); // is this entry unused? 3618 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3619 3620 __ cmp(O4, O0); // check if current entry is for same object 3621 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3622 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3623 3624 __ bind( entry ); 3625 3626 __ cmp( O3, O2 ); 3627 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3628 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3629 3630 __ bind( exit ); 3631 } 3632 3633 { Label allocated; 3634 3635 // found free slot? 3636 __ br_notnull_short(O1, Assembler::pn, allocated); 3637 3638 __ add_monitor_to_stack( false, O2, O3 ); 3639 __ mov(Lmonitors, O1); 3640 3641 __ bind(allocated); 3642 } 3643 3644 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3645 // The object has already been poped from the stack, so the expression stack looks correct. 3646 __ inc(Lbcp); 3647 3648 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3649 __ lock_object(O1, O0); 3650 3651 // check if there's enough space on the stack for the monitors after locking 3652 __ generate_stack_overflow_check(0); 3653 3654 // The bcp has already been incremented. Just need to dispatch to next instruction. 3655 __ dispatch_next(vtos); 3656 } 3657 3658 3659 void TemplateTable::monitorexit() { 3660 transition(atos, vtos); 3661 __ verify_oop(Otos_i); 3662 __ tst(Otos_i); 3663 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3664 3665 assert(O0 == Otos_i, "just checking"); 3666 3667 { Label entry, loop, found; 3668 __ add( __ top_most_monitor(), O2 ); // last one to check 3669 __ ba(entry); 3670 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3671 // By using a local it survives the call to the C routine. 3672 __ delayed()->mov( Lmonitors, Lscratch ); 3673 3674 __ bind( loop ); 3675 3676 __ verify_oop(O4); // verify each monitor's oop 3677 __ cmp(O4, O0); // check if current entry is for desired object 3678 __ brx( Assembler::equal, true, Assembler::pt, found ); 3679 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3680 3681 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3682 3683 __ bind( entry ); 3684 3685 __ cmp( Lscratch, O2 ); 3686 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3687 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3688 3689 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3690 __ should_not_reach_here(); 3691 3692 __ bind(found); 3693 } 3694 __ unlock_object(O1); 3695 } 3696 3697 3698 //---------------------------------------------------------------------------------------------------- 3699 // Wide instructions 3700 3701 void TemplateTable::wide() { 3702 transition(vtos, vtos); 3703 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3704 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3705 AddressLiteral ep(Interpreter::_wentry_point); 3706 __ set(ep, G4_scratch); 3707 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3708 __ jmp(G3_scratch, G0); 3709 __ delayed()->nop(); 3710 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3711 } 3712 3713 3714 //---------------------------------------------------------------------------------------------------- 3715 // Multi arrays 3716 3717 void TemplateTable::multianewarray() { 3718 transition(vtos, atos); 3719 // put ndims * wordSize into Lscratch 3720 __ ldub( Lbcp, 3, Lscratch); 3721 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3722 // Lesp points past last_dim, so set to O1 to first_dim address 3723 __ add( Lesp, Lscratch, O1); 3724 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3725 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3726 }