1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/templateTable.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/safepointMechanism.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #define __ _masm-> 43 44 // Misc helpers 45 46 // Do an oop store like *(base + index + offset) = val 47 // index can be noreg, 48 static void do_oop_store(InterpreterMacroAssembler* _masm, 49 Register base, 50 Register index, 51 int offset, 52 Register val, 53 Register tmp, 54 BarrierSet::Name barrier, 55 bool precise) { 56 assert(tmp != val && tmp != base && tmp != index, "register collision"); 57 assert(index == noreg || offset == 0, "only one offset"); 58 switch (barrier) { 59 #if INCLUDE_ALL_GCS 60 case BarrierSet::G1BarrierSet: 61 { 62 // Load and record the previous value. 63 __ g1_write_barrier_pre(base, index, offset, 64 noreg /* pre_val */, 65 tmp, true /*preserve_o_regs*/); 66 67 // G1 barrier needs uncompressed oop for region cross check. 68 Register new_val = val; 69 if (UseCompressedOops && val != G0) { 70 new_val = tmp; 71 __ mov(val, new_val); 72 } 73 74 if (index == noreg ) { 75 assert(Assembler::is_simm13(offset), "fix this code"); 76 __ store_heap_oop(val, base, offset); 77 } else { 78 __ store_heap_oop(val, base, index); 79 } 80 81 // No need for post barrier if storing NULL 82 if (val != G0) { 83 if (precise) { 84 if (index == noreg) { 85 __ add(base, offset, base); 86 } else { 87 __ add(base, index, base); 88 } 89 } 90 __ g1_write_barrier_post(base, new_val, tmp); 91 } 92 } 93 break; 94 #endif // INCLUDE_ALL_GCS 95 case BarrierSet::CardTableBarrierSet: 96 { 97 if (index == noreg ) { 98 assert(Assembler::is_simm13(offset), "fix this code"); 99 __ store_heap_oop(val, base, offset); 100 } else { 101 __ store_heap_oop(val, base, index); 102 } 103 // No need for post barrier if storing NULL 104 if (val != G0) { 105 if (precise) { 106 if (index == noreg) { 107 __ add(base, offset, base); 108 } else { 109 __ add(base, index, base); 110 } 111 } 112 __ card_write_barrier_post(base, val, tmp); 113 } 114 } 115 break; 116 case BarrierSet::ModRef: 117 ShouldNotReachHere(); 118 break; 119 default : 120 ShouldNotReachHere(); 121 122 } 123 } 124 125 126 //---------------------------------------------------------------------------------------------------- 127 // Platform-dependent initialization 128 129 void TemplateTable::pd_initialize() { 130 // (none) 131 } 132 133 134 //---------------------------------------------------------------------------------------------------- 135 // Condition conversion 136 Assembler::Condition ccNot(TemplateTable::Condition cc) { 137 switch (cc) { 138 case TemplateTable::equal : return Assembler::notEqual; 139 case TemplateTable::not_equal : return Assembler::equal; 140 case TemplateTable::less : return Assembler::greaterEqual; 141 case TemplateTable::less_equal : return Assembler::greater; 142 case TemplateTable::greater : return Assembler::lessEqual; 143 case TemplateTable::greater_equal: return Assembler::less; 144 } 145 ShouldNotReachHere(); 146 return Assembler::zero; 147 } 148 149 //---------------------------------------------------------------------------------------------------- 150 // Miscelaneous helper routines 151 152 153 Address TemplateTable::at_bcp(int offset) { 154 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 155 return Address(Lbcp, offset); 156 } 157 158 159 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 160 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 161 int byte_no) { 162 // With sharing on, may need to test Method* flag. 163 if (!RewriteBytecodes) return; 164 Label L_patch_done; 165 166 switch (bc) { 167 case Bytecodes::_fast_aputfield: 168 case Bytecodes::_fast_bputfield: 169 case Bytecodes::_fast_zputfield: 170 case Bytecodes::_fast_cputfield: 171 case Bytecodes::_fast_dputfield: 172 case Bytecodes::_fast_fputfield: 173 case Bytecodes::_fast_iputfield: 174 case Bytecodes::_fast_lputfield: 175 case Bytecodes::_fast_sputfield: 176 { 177 // We skip bytecode quickening for putfield instructions when 178 // the put_code written to the constant pool cache is zero. 179 // This is required so that every execution of this instruction 180 // calls out to InterpreterRuntime::resolve_get_put to do 181 // additional, required work. 182 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 183 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 184 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 185 __ set(bc, bc_reg); 186 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 187 } 188 break; 189 default: 190 assert(byte_no == -1, "sanity"); 191 if (load_bc_into_bc_reg) { 192 __ set(bc, bc_reg); 193 } 194 } 195 196 if (JvmtiExport::can_post_breakpoint()) { 197 Label L_fast_patch; 198 __ ldub(at_bcp(0), temp_reg); 199 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 200 // perform the quickening, slowly, in the bowels of the breakpoint table 201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 202 __ ba_short(L_patch_done); 203 __ bind(L_fast_patch); 204 } 205 206 #ifdef ASSERT 207 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 208 Label L_okay; 209 __ ldub(at_bcp(0), temp_reg); 210 __ cmp(temp_reg, orig_bytecode); 211 __ br(Assembler::equal, false, Assembler::pt, L_okay); 212 __ delayed()->cmp(temp_reg, bc_reg); 213 __ br(Assembler::equal, false, Assembler::pt, L_okay); 214 __ delayed()->nop(); 215 __ stop("patching the wrong bytecode"); 216 __ bind(L_okay); 217 #endif 218 219 // patch bytecode 220 __ stb(bc_reg, at_bcp(0)); 221 __ bind(L_patch_done); 222 } 223 224 //---------------------------------------------------------------------------------------------------- 225 // Individual instructions 226 227 void TemplateTable::nop() { 228 transition(vtos, vtos); 229 // nothing to do 230 } 231 232 void TemplateTable::shouldnotreachhere() { 233 transition(vtos, vtos); 234 __ stop("shouldnotreachhere bytecode"); 235 } 236 237 void TemplateTable::aconst_null() { 238 transition(vtos, atos); 239 __ clr(Otos_i); 240 } 241 242 243 void TemplateTable::iconst(int value) { 244 transition(vtos, itos); 245 __ set(value, Otos_i); 246 } 247 248 249 void TemplateTable::lconst(int value) { 250 transition(vtos, ltos); 251 assert(value >= 0, "check this code"); 252 __ set(value, Otos_l); 253 } 254 255 256 void TemplateTable::fconst(int value) { 257 transition(vtos, ftos); 258 static float zero = 0.0, one = 1.0, two = 2.0; 259 float* p; 260 switch( value ) { 261 default: ShouldNotReachHere(); 262 case 0: p = &zero; break; 263 case 1: p = &one; break; 264 case 2: p = &two; break; 265 } 266 AddressLiteral a(p); 267 __ sethi(a, G3_scratch); 268 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 269 } 270 271 272 void TemplateTable::dconst(int value) { 273 transition(vtos, dtos); 274 static double zero = 0.0, one = 1.0; 275 double* p; 276 switch( value ) { 277 default: ShouldNotReachHere(); 278 case 0: p = &zero; break; 279 case 1: p = &one; break; 280 } 281 AddressLiteral a(p); 282 __ sethi(a, G3_scratch); 283 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 284 } 285 286 287 // %%%%% Should factore most snippet templates across platforms 288 289 void TemplateTable::bipush() { 290 transition(vtos, itos); 291 __ ldsb( at_bcp(1), Otos_i ); 292 } 293 294 void TemplateTable::sipush() { 295 transition(vtos, itos); 296 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 297 } 298 299 void TemplateTable::ldc(bool wide) { 300 transition(vtos, vtos); 301 Label call_ldc, notInt, isString, notString, notClass, exit; 302 303 if (wide) { 304 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 305 } else { 306 __ ldub(Lbcp, 1, O1); 307 } 308 __ get_cpool_and_tags(O0, O2); 309 310 const int base_offset = ConstantPool::header_size() * wordSize; 311 const int tags_offset = Array<u1>::base_offset_in_bytes(); 312 313 // get type from tags 314 __ add(O2, tags_offset, O2); 315 __ ldub(O2, O1, O2); 316 317 // unresolved class? If so, must resolve 318 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 319 320 // unresolved class in error state 321 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 322 323 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 324 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 325 __ delayed()->add(O0, base_offset, O0); 326 327 __ bind(call_ldc); 328 __ set(wide, O1); 329 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 330 __ push(atos); 331 __ ba_short(exit); 332 333 __ bind(notClass); 334 // __ add(O0, base_offset, O0); 335 __ sll(O1, LogBytesPerWord, O1); 336 __ cmp(O2, JVM_CONSTANT_Integer); 337 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 338 __ delayed()->cmp(O2, JVM_CONSTANT_String); 339 __ ld(O0, O1, Otos_i); 340 __ push(itos); 341 __ ba_short(exit); 342 343 __ bind(notInt); 344 // __ cmp(O2, JVM_CONSTANT_String); 345 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 346 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 347 __ bind(isString); 348 __ stop("string should be rewritten to fast_aldc"); 349 __ ba_short(exit); 350 351 __ bind(notString); 352 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 353 __ push(ftos); 354 355 __ bind(exit); 356 } 357 358 // Fast path for caching oop constants. 359 // %%% We should use this to handle Class and String constants also. 360 // %%% It will simplify the ldc/primitive path considerably. 361 void TemplateTable::fast_aldc(bool wide) { 362 transition(vtos, atos); 363 364 int index_size = wide ? sizeof(u2) : sizeof(u1); 365 Label resolved; 366 367 // We are resolved if the resolved reference cache entry contains a 368 // non-null object (CallSite, etc.) 369 assert_different_registers(Otos_i, G3_scratch); 370 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 371 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 372 __ tst(Otos_i); 373 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 374 __ delayed()->set((int)bytecode(), O1); 375 376 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 377 378 // first time invocation - must resolve first 379 __ call_VM(Otos_i, entry, O1); 380 __ bind(resolved); 381 __ verify_oop(Otos_i); 382 } 383 384 void TemplateTable::ldc2_w() { 385 transition(vtos, vtos); 386 Label Long, exit; 387 388 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 389 __ get_cpool_and_tags(O0, O2); 390 391 const int base_offset = ConstantPool::header_size() * wordSize; 392 const int tags_offset = Array<u1>::base_offset_in_bytes(); 393 // get type from tags 394 __ add(O2, tags_offset, O2); 395 __ ldub(O2, O1, O2); 396 397 __ sll(O1, LogBytesPerWord, O1); 398 __ add(O0, O1, G3_scratch); 399 400 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 401 // A double can be placed at word-aligned locations in the constant pool. 402 // Check out Conversions.java for an example. 403 // Also ConstantPool::header_size() is 20, which makes it very difficult 404 // to double-align double on the constant pool. SG, 11/7/97 405 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 406 __ push(dtos); 407 __ ba_short(exit); 408 409 __ bind(Long); 410 __ ldx(G3_scratch, base_offset, Otos_l); 411 __ push(ltos); 412 413 __ bind(exit); 414 } 415 416 void TemplateTable::locals_index(Register reg, int offset) { 417 __ ldub( at_bcp(offset), reg ); 418 } 419 420 void TemplateTable::locals_index_wide(Register reg) { 421 // offset is 2, not 1, because Lbcp points to wide prefix code 422 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 423 } 424 425 void TemplateTable::iload() { 426 iload_internal(); 427 } 428 429 void TemplateTable::nofast_iload() { 430 iload_internal(may_not_rewrite); 431 } 432 433 void TemplateTable::iload_internal(RewriteControl rc) { 434 transition(vtos, itos); 435 // Rewrite iload,iload pair into fast_iload2 436 // iload,caload pair into fast_icaload 437 if (RewriteFrequentPairs && rc == may_rewrite) { 438 Label rewrite, done; 439 440 // get next byte 441 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 442 443 // if _iload, wait to rewrite to iload2. We only want to rewrite the 444 // last two iloads in a pair. Comparing against fast_iload means that 445 // the next bytecode is neither an iload or a caload, and therefore 446 // an iload pair. 447 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 448 449 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 450 __ br(Assembler::equal, false, Assembler::pn, rewrite); 451 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 452 453 __ cmp(G3_scratch, (int)Bytecodes::_caload); 454 __ br(Assembler::equal, false, Assembler::pn, rewrite); 455 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 456 457 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 458 // rewrite 459 // G4_scratch: fast bytecode 460 __ bind(rewrite); 461 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 462 __ bind(done); 463 } 464 465 // Get the local value into tos 466 locals_index(G3_scratch); 467 __ access_local_int( G3_scratch, Otos_i ); 468 } 469 470 void TemplateTable::fast_iload2() { 471 transition(vtos, itos); 472 locals_index(G3_scratch); 473 __ access_local_int( G3_scratch, Otos_i ); 474 __ push_i(); 475 locals_index(G3_scratch, 3); // get next bytecode's local index. 476 __ access_local_int( G3_scratch, Otos_i ); 477 } 478 479 void TemplateTable::fast_iload() { 480 transition(vtos, itos); 481 locals_index(G3_scratch); 482 __ access_local_int( G3_scratch, Otos_i ); 483 } 484 485 void TemplateTable::lload() { 486 transition(vtos, ltos); 487 locals_index(G3_scratch); 488 __ access_local_long( G3_scratch, Otos_l ); 489 } 490 491 492 void TemplateTable::fload() { 493 transition(vtos, ftos); 494 locals_index(G3_scratch); 495 __ access_local_float( G3_scratch, Ftos_f ); 496 } 497 498 499 void TemplateTable::dload() { 500 transition(vtos, dtos); 501 locals_index(G3_scratch); 502 __ access_local_double( G3_scratch, Ftos_d ); 503 } 504 505 506 void TemplateTable::aload() { 507 transition(vtos, atos); 508 locals_index(G3_scratch); 509 __ access_local_ptr( G3_scratch, Otos_i); 510 } 511 512 513 void TemplateTable::wide_iload() { 514 transition(vtos, itos); 515 locals_index_wide(G3_scratch); 516 __ access_local_int( G3_scratch, Otos_i ); 517 } 518 519 520 void TemplateTable::wide_lload() { 521 transition(vtos, ltos); 522 locals_index_wide(G3_scratch); 523 __ access_local_long( G3_scratch, Otos_l ); 524 } 525 526 527 void TemplateTable::wide_fload() { 528 transition(vtos, ftos); 529 locals_index_wide(G3_scratch); 530 __ access_local_float( G3_scratch, Ftos_f ); 531 } 532 533 534 void TemplateTable::wide_dload() { 535 transition(vtos, dtos); 536 locals_index_wide(G3_scratch); 537 __ access_local_double( G3_scratch, Ftos_d ); 538 } 539 540 541 void TemplateTable::wide_aload() { 542 transition(vtos, atos); 543 locals_index_wide(G3_scratch); 544 __ access_local_ptr( G3_scratch, Otos_i ); 545 __ verify_oop(Otos_i); 546 } 547 548 549 void TemplateTable::iaload() { 550 transition(itos, itos); 551 // Otos_i: index 552 // tos: array 553 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 554 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 555 } 556 557 558 void TemplateTable::laload() { 559 transition(itos, ltos); 560 // Otos_i: index 561 // O2: array 562 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 563 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 564 } 565 566 567 void TemplateTable::faload() { 568 transition(itos, ftos); 569 // Otos_i: index 570 // O2: array 571 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 572 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 573 } 574 575 576 void TemplateTable::daload() { 577 transition(itos, dtos); 578 // Otos_i: index 579 // O2: array 580 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 581 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 582 } 583 584 585 void TemplateTable::aaload() { 586 transition(itos, atos); 587 // Otos_i: index 588 // tos: array 589 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 590 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 591 __ verify_oop(Otos_i); 592 } 593 594 595 void TemplateTable::baload() { 596 transition(itos, itos); 597 // Otos_i: index 598 // tos: array 599 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 600 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 601 } 602 603 604 void TemplateTable::caload() { 605 transition(itos, itos); 606 // Otos_i: index 607 // tos: array 608 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 609 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 610 } 611 612 void TemplateTable::fast_icaload() { 613 transition(vtos, itos); 614 // Otos_i: index 615 // tos: array 616 locals_index(G3_scratch); 617 __ access_local_int( G3_scratch, Otos_i ); 618 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 619 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 620 } 621 622 623 void TemplateTable::saload() { 624 transition(itos, itos); 625 // Otos_i: index 626 // tos: array 627 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 628 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 629 } 630 631 632 void TemplateTable::iload(int n) { 633 transition(vtos, itos); 634 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 635 } 636 637 638 void TemplateTable::lload(int n) { 639 transition(vtos, ltos); 640 assert(n+1 < Argument::n_register_parameters, "would need more code"); 641 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 642 } 643 644 645 void TemplateTable::fload(int n) { 646 transition(vtos, ftos); 647 assert(n < Argument::n_register_parameters, "would need more code"); 648 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 649 } 650 651 652 void TemplateTable::dload(int n) { 653 transition(vtos, dtos); 654 FloatRegister dst = Ftos_d; 655 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 656 } 657 658 659 void TemplateTable::aload(int n) { 660 transition(vtos, atos); 661 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 662 } 663 664 void TemplateTable::aload_0() { 665 aload_0_internal(); 666 } 667 668 void TemplateTable::nofast_aload_0() { 669 aload_0_internal(may_not_rewrite); 670 } 671 672 void TemplateTable::aload_0_internal(RewriteControl rc) { 673 transition(vtos, atos); 674 675 // According to bytecode histograms, the pairs: 676 // 677 // _aload_0, _fast_igetfield (itos) 678 // _aload_0, _fast_agetfield (atos) 679 // _aload_0, _fast_fgetfield (ftos) 680 // 681 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 682 // bytecode checks the next bytecode and then rewrites the current 683 // bytecode into a pair bytecode; otherwise it rewrites the current 684 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 685 // 686 if (RewriteFrequentPairs && rc == may_rewrite) { 687 Label rewrite, done; 688 689 // get next byte 690 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 691 692 // if _getfield then wait with rewrite 693 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 694 695 // if _igetfield then rewrite to _fast_iaccess_0 696 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 697 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 698 __ br(Assembler::equal, false, Assembler::pn, rewrite); 699 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 700 701 // if _agetfield then rewrite to _fast_aaccess_0 702 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 703 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 704 __ br(Assembler::equal, false, Assembler::pn, rewrite); 705 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 706 707 // if _fgetfield then rewrite to _fast_faccess_0 708 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 709 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 710 __ br(Assembler::equal, false, Assembler::pn, rewrite); 711 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 712 713 // else rewrite to _fast_aload0 714 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 715 __ set(Bytecodes::_fast_aload_0, G4_scratch); 716 717 // rewrite 718 // G4_scratch: fast bytecode 719 __ bind(rewrite); 720 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 721 __ bind(done); 722 } 723 724 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 725 aload(0); 726 } 727 728 void TemplateTable::istore() { 729 transition(itos, vtos); 730 locals_index(G3_scratch); 731 __ store_local_int( G3_scratch, Otos_i ); 732 } 733 734 735 void TemplateTable::lstore() { 736 transition(ltos, vtos); 737 locals_index(G3_scratch); 738 __ store_local_long( G3_scratch, Otos_l ); 739 } 740 741 742 void TemplateTable::fstore() { 743 transition(ftos, vtos); 744 locals_index(G3_scratch); 745 __ store_local_float( G3_scratch, Ftos_f ); 746 } 747 748 749 void TemplateTable::dstore() { 750 transition(dtos, vtos); 751 locals_index(G3_scratch); 752 __ store_local_double( G3_scratch, Ftos_d ); 753 } 754 755 756 void TemplateTable::astore() { 757 transition(vtos, vtos); 758 __ load_ptr(0, Otos_i); 759 __ inc(Lesp, Interpreter::stackElementSize); 760 __ verify_oop_or_return_address(Otos_i, G3_scratch); 761 locals_index(G3_scratch); 762 __ store_local_ptr(G3_scratch, Otos_i); 763 } 764 765 766 void TemplateTable::wide_istore() { 767 transition(vtos, vtos); 768 __ pop_i(); 769 locals_index_wide(G3_scratch); 770 __ store_local_int( G3_scratch, Otos_i ); 771 } 772 773 774 void TemplateTable::wide_lstore() { 775 transition(vtos, vtos); 776 __ pop_l(); 777 locals_index_wide(G3_scratch); 778 __ store_local_long( G3_scratch, Otos_l ); 779 } 780 781 782 void TemplateTable::wide_fstore() { 783 transition(vtos, vtos); 784 __ pop_f(); 785 locals_index_wide(G3_scratch); 786 __ store_local_float( G3_scratch, Ftos_f ); 787 } 788 789 790 void TemplateTable::wide_dstore() { 791 transition(vtos, vtos); 792 __ pop_d(); 793 locals_index_wide(G3_scratch); 794 __ store_local_double( G3_scratch, Ftos_d ); 795 } 796 797 798 void TemplateTable::wide_astore() { 799 transition(vtos, vtos); 800 __ load_ptr(0, Otos_i); 801 __ inc(Lesp, Interpreter::stackElementSize); 802 __ verify_oop_or_return_address(Otos_i, G3_scratch); 803 locals_index_wide(G3_scratch); 804 __ store_local_ptr(G3_scratch, Otos_i); 805 } 806 807 808 void TemplateTable::iastore() { 809 transition(itos, vtos); 810 __ pop_i(O2); // index 811 // Otos_i: val 812 // O3: array 813 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 814 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 815 } 816 817 818 void TemplateTable::lastore() { 819 transition(ltos, vtos); 820 __ pop_i(O2); // index 821 // Otos_l: val 822 // O3: array 823 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 824 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 825 } 826 827 828 void TemplateTable::fastore() { 829 transition(ftos, vtos); 830 __ pop_i(O2); // index 831 // Ftos_f: val 832 // O3: array 833 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 834 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 835 } 836 837 838 void TemplateTable::dastore() { 839 transition(dtos, vtos); 840 __ pop_i(O2); // index 841 // Fos_d: val 842 // O3: array 843 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 844 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 845 } 846 847 848 void TemplateTable::aastore() { 849 Label store_ok, is_null, done; 850 transition(vtos, vtos); 851 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 852 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 853 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 854 // Otos_i: val 855 // O2: index 856 // O3: array 857 __ verify_oop(Otos_i); 858 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 859 860 // do array store check - check for NULL value first 861 __ br_null_short( Otos_i, Assembler::pn, is_null ); 862 863 __ load_klass(O3, O4); // get array klass 864 __ load_klass(Otos_i, O5); // get value klass 865 866 // do fast instanceof cache test 867 868 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 869 870 assert(Otos_i == O0, "just checking"); 871 872 // Otos_i: value 873 // O1: addr - offset 874 // O2: index 875 // O3: array 876 // O4: array element klass 877 // O5: value klass 878 879 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 880 881 // Generate a fast subtype check. Branch to store_ok if no 882 // failure. Throw if failure. 883 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 884 885 // Not a subtype; so must throw exception 886 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 887 888 // Store is OK. 889 __ bind(store_ok); 890 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 891 892 __ ba(done); 893 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 894 895 __ bind(is_null); 896 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 897 898 __ profile_null_seen(G3_scratch); 899 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 900 __ bind(done); 901 } 902 903 904 void TemplateTable::bastore() { 905 transition(itos, vtos); 906 __ pop_i(O2); // index 907 // Otos_i: val 908 // O2: index 909 // O3: array 910 __ index_check(O3, O2, 0, G3_scratch, O2); 911 // Need to check whether array is boolean or byte 912 // since both types share the bastore bytecode. 913 __ load_klass(O3, G4_scratch); 914 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch); 915 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch); 916 __ andcc(G3_scratch, G4_scratch, G0); 917 Label L_skip; 918 __ br(Assembler::zero, false, Assembler::pn, L_skip); 919 __ delayed()->nop(); 920 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1 921 __ bind(L_skip); 922 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 923 } 924 925 926 void TemplateTable::castore() { 927 transition(itos, vtos); 928 __ pop_i(O2); // index 929 // Otos_i: val 930 // O3: array 931 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 932 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 933 } 934 935 936 void TemplateTable::sastore() { 937 // %%%%% Factor across platform 938 castore(); 939 } 940 941 942 void TemplateTable::istore(int n) { 943 transition(itos, vtos); 944 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 945 } 946 947 948 void TemplateTable::lstore(int n) { 949 transition(ltos, vtos); 950 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 951 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 952 953 } 954 955 956 void TemplateTable::fstore(int n) { 957 transition(ftos, vtos); 958 assert(n < Argument::n_register_parameters, "only handle register cases"); 959 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 960 } 961 962 963 void TemplateTable::dstore(int n) { 964 transition(dtos, vtos); 965 FloatRegister src = Ftos_d; 966 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 967 } 968 969 970 void TemplateTable::astore(int n) { 971 transition(vtos, vtos); 972 __ load_ptr(0, Otos_i); 973 __ inc(Lesp, Interpreter::stackElementSize); 974 __ verify_oop_or_return_address(Otos_i, G3_scratch); 975 __ store_local_ptr(n, Otos_i); 976 } 977 978 979 void TemplateTable::pop() { 980 transition(vtos, vtos); 981 __ inc(Lesp, Interpreter::stackElementSize); 982 } 983 984 985 void TemplateTable::pop2() { 986 transition(vtos, vtos); 987 __ inc(Lesp, 2 * Interpreter::stackElementSize); 988 } 989 990 991 void TemplateTable::dup() { 992 transition(vtos, vtos); 993 // stack: ..., a 994 // load a and tag 995 __ load_ptr(0, Otos_i); 996 __ push_ptr(Otos_i); 997 // stack: ..., a, a 998 } 999 1000 1001 void TemplateTable::dup_x1() { 1002 transition(vtos, vtos); 1003 // stack: ..., a, b 1004 __ load_ptr( 1, G3_scratch); // get a 1005 __ load_ptr( 0, Otos_l1); // get b 1006 __ store_ptr(1, Otos_l1); // put b 1007 __ store_ptr(0, G3_scratch); // put a - like swap 1008 __ push_ptr(Otos_l1); // push b 1009 // stack: ..., b, a, b 1010 } 1011 1012 1013 void TemplateTable::dup_x2() { 1014 transition(vtos, vtos); 1015 // stack: ..., a, b, c 1016 // get c and push on stack, reuse registers 1017 __ load_ptr( 0, G3_scratch); // get c 1018 __ push_ptr(G3_scratch); // push c with tag 1019 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1020 // (stack offsets n+1 now) 1021 __ load_ptr( 3, Otos_l1); // get a 1022 __ store_ptr(3, G3_scratch); // put c at 3 1023 // stack: ..., c, b, c, c (a in reg) 1024 __ load_ptr( 2, G3_scratch); // get b 1025 __ store_ptr(2, Otos_l1); // put a at 2 1026 // stack: ..., c, a, c, c (b in reg) 1027 __ store_ptr(1, G3_scratch); // put b at 1 1028 // stack: ..., c, a, b, c 1029 } 1030 1031 1032 void TemplateTable::dup2() { 1033 transition(vtos, vtos); 1034 __ load_ptr(1, G3_scratch); // get a 1035 __ load_ptr(0, Otos_l1); // get b 1036 __ push_ptr(G3_scratch); // push a 1037 __ push_ptr(Otos_l1); // push b 1038 // stack: ..., a, b, a, b 1039 } 1040 1041 1042 void TemplateTable::dup2_x1() { 1043 transition(vtos, vtos); 1044 // stack: ..., a, b, c 1045 __ load_ptr( 1, Lscratch); // get b 1046 __ load_ptr( 2, Otos_l1); // get a 1047 __ store_ptr(2, Lscratch); // put b at a 1048 // stack: ..., b, b, c 1049 __ load_ptr( 0, G3_scratch); // get c 1050 __ store_ptr(1, G3_scratch); // put c at b 1051 // stack: ..., b, c, c 1052 __ store_ptr(0, Otos_l1); // put a at c 1053 // stack: ..., b, c, a 1054 __ push_ptr(Lscratch); // push b 1055 __ push_ptr(G3_scratch); // push c 1056 // stack: ..., b, c, a, b, c 1057 } 1058 1059 1060 // The spec says that these types can be a mixture of category 1 (1 word) 1061 // types and/or category 2 types (long and doubles) 1062 void TemplateTable::dup2_x2() { 1063 transition(vtos, vtos); 1064 // stack: ..., a, b, c, d 1065 __ load_ptr( 1, Lscratch); // get c 1066 __ load_ptr( 3, Otos_l1); // get a 1067 __ store_ptr(3, Lscratch); // put c at 3 1068 __ store_ptr(1, Otos_l1); // put a at 1 1069 // stack: ..., c, b, a, d 1070 __ load_ptr( 2, G3_scratch); // get b 1071 __ load_ptr( 0, Otos_l1); // get d 1072 __ store_ptr(0, G3_scratch); // put b at 0 1073 __ store_ptr(2, Otos_l1); // put d at 2 1074 // stack: ..., c, d, a, b 1075 __ push_ptr(Lscratch); // push c 1076 __ push_ptr(Otos_l1); // push d 1077 // stack: ..., c, d, a, b, c, d 1078 } 1079 1080 1081 void TemplateTable::swap() { 1082 transition(vtos, vtos); 1083 // stack: ..., a, b 1084 __ load_ptr( 1, G3_scratch); // get a 1085 __ load_ptr( 0, Otos_l1); // get b 1086 __ store_ptr(0, G3_scratch); // put b 1087 __ store_ptr(1, Otos_l1); // put a 1088 // stack: ..., b, a 1089 } 1090 1091 1092 void TemplateTable::iop2(Operation op) { 1093 transition(itos, itos); 1094 __ pop_i(O1); 1095 switch (op) { 1096 case add: __ add(O1, Otos_i, Otos_i); break; 1097 case sub: __ sub(O1, Otos_i, Otos_i); break; 1098 // %%%%% Mul may not exist: better to call .mul? 1099 case mul: __ smul(O1, Otos_i, Otos_i); break; 1100 case _and: __ and3(O1, Otos_i, Otos_i); break; 1101 case _or: __ or3(O1, Otos_i, Otos_i); break; 1102 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1103 case shl: __ sll(O1, Otos_i, Otos_i); break; 1104 case shr: __ sra(O1, Otos_i, Otos_i); break; 1105 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1106 default: ShouldNotReachHere(); 1107 } 1108 } 1109 1110 1111 void TemplateTable::lop2(Operation op) { 1112 transition(ltos, ltos); 1113 __ pop_l(O2); 1114 switch (op) { 1115 case add: __ add(O2, Otos_l, Otos_l); break; 1116 case sub: __ sub(O2, Otos_l, Otos_l); break; 1117 case _and: __ and3(O2, Otos_l, Otos_l); break; 1118 case _or: __ or3(O2, Otos_l, Otos_l); break; 1119 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1120 default: ShouldNotReachHere(); 1121 } 1122 } 1123 1124 1125 void TemplateTable::idiv() { 1126 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1127 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1128 1129 transition(itos, itos); 1130 __ pop_i(O1); // get 1st op 1131 1132 // Y contains upper 32 bits of result, set it to 0 or all ones 1133 __ wry(G0); 1134 __ mov(~0, G3_scratch); 1135 1136 __ tst(O1); 1137 Label neg; 1138 __ br(Assembler::negative, true, Assembler::pn, neg); 1139 __ delayed()->wry(G3_scratch); 1140 __ bind(neg); 1141 1142 Label ok; 1143 __ tst(Otos_i); 1144 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1145 1146 const int min_int = 0x80000000; 1147 Label regular; 1148 __ cmp(Otos_i, -1); 1149 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1150 // Don't put set in delay slot 1151 // Set will turn into multiple instructions in 64 bit mode 1152 __ delayed()->nop(); 1153 __ set(min_int, G4_scratch); 1154 Label done; 1155 __ cmp(O1, G4_scratch); 1156 __ br(Assembler::equal, true, Assembler::pt, done); 1157 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1158 1159 __ bind(regular); 1160 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1161 __ bind(done); 1162 } 1163 1164 1165 void TemplateTable::irem() { 1166 transition(itos, itos); 1167 __ mov(Otos_i, O2); // save divisor 1168 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1169 __ smul(Otos_i, O2, Otos_i); 1170 __ sub(O1, Otos_i, Otos_i); 1171 } 1172 1173 1174 void TemplateTable::lmul() { 1175 transition(ltos, ltos); 1176 __ pop_l(O2); 1177 __ mulx(Otos_l, O2, Otos_l); 1178 1179 } 1180 1181 1182 void TemplateTable::ldiv() { 1183 transition(ltos, ltos); 1184 1185 // check for zero 1186 __ pop_l(O2); 1187 __ tst(Otos_l); 1188 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1189 __ sdivx(O2, Otos_l, Otos_l); 1190 } 1191 1192 1193 void TemplateTable::lrem() { 1194 transition(ltos, ltos); 1195 1196 // check for zero 1197 __ pop_l(O2); 1198 __ tst(Otos_l); 1199 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1200 __ sdivx(O2, Otos_l, Otos_l2); 1201 __ mulx (Otos_l2, Otos_l, Otos_l2); 1202 __ sub (O2, Otos_l2, Otos_l); 1203 } 1204 1205 1206 void TemplateTable::lshl() { 1207 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1208 1209 __ pop_l(O2); // shift value in O2, O3 1210 __ sllx(O2, Otos_i, Otos_l); 1211 } 1212 1213 1214 void TemplateTable::lshr() { 1215 transition(itos, ltos); // %%%% see lshl comment 1216 1217 __ pop_l(O2); // shift value in O2, O3 1218 __ srax(O2, Otos_i, Otos_l); 1219 } 1220 1221 1222 1223 void TemplateTable::lushr() { 1224 transition(itos, ltos); // %%%% see lshl comment 1225 1226 __ pop_l(O2); // shift value in O2, O3 1227 __ srlx(O2, Otos_i, Otos_l); 1228 } 1229 1230 1231 void TemplateTable::fop2(Operation op) { 1232 transition(ftos, ftos); 1233 switch (op) { 1234 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1235 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1236 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1237 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1238 case rem: 1239 assert(Ftos_f == F0, "just checking"); 1240 // LP64 calling conventions use F1, F3 for passing 2 floats 1241 __ pop_f(F1); 1242 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1243 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1244 assert( Ftos_f == F0, "fix this code" ); 1245 break; 1246 1247 default: ShouldNotReachHere(); 1248 } 1249 } 1250 1251 1252 void TemplateTable::dop2(Operation op) { 1253 transition(dtos, dtos); 1254 switch (op) { 1255 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1256 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1257 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1258 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1259 case rem: 1260 // Pass arguments in D0, D2 1261 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1262 __ pop_d( F0 ); 1263 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1264 assert( Ftos_d == F0, "fix this code" ); 1265 break; 1266 1267 default: ShouldNotReachHere(); 1268 } 1269 } 1270 1271 1272 void TemplateTable::ineg() { 1273 transition(itos, itos); 1274 __ neg(Otos_i); 1275 } 1276 1277 1278 void TemplateTable::lneg() { 1279 transition(ltos, ltos); 1280 __ sub(G0, Otos_l, Otos_l); 1281 } 1282 1283 1284 void TemplateTable::fneg() { 1285 transition(ftos, ftos); 1286 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1287 } 1288 1289 1290 void TemplateTable::dneg() { 1291 transition(dtos, dtos); 1292 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1293 } 1294 1295 1296 void TemplateTable::iinc() { 1297 transition(vtos, vtos); 1298 locals_index(G3_scratch); 1299 __ ldsb(Lbcp, 2, O2); // load constant 1300 __ access_local_int(G3_scratch, Otos_i); 1301 __ add(Otos_i, O2, Otos_i); 1302 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1303 } 1304 1305 1306 void TemplateTable::wide_iinc() { 1307 transition(vtos, vtos); 1308 locals_index_wide(G3_scratch); 1309 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1310 __ access_local_int(G3_scratch, Otos_i); 1311 __ add(Otos_i, O3, Otos_i); 1312 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1313 } 1314 1315 1316 void TemplateTable::convert() { 1317 // %%%%% Factor this first part accross platforms 1318 #ifdef ASSERT 1319 TosState tos_in = ilgl; 1320 TosState tos_out = ilgl; 1321 switch (bytecode()) { 1322 case Bytecodes::_i2l: // fall through 1323 case Bytecodes::_i2f: // fall through 1324 case Bytecodes::_i2d: // fall through 1325 case Bytecodes::_i2b: // fall through 1326 case Bytecodes::_i2c: // fall through 1327 case Bytecodes::_i2s: tos_in = itos; break; 1328 case Bytecodes::_l2i: // fall through 1329 case Bytecodes::_l2f: // fall through 1330 case Bytecodes::_l2d: tos_in = ltos; break; 1331 case Bytecodes::_f2i: // fall through 1332 case Bytecodes::_f2l: // fall through 1333 case Bytecodes::_f2d: tos_in = ftos; break; 1334 case Bytecodes::_d2i: // fall through 1335 case Bytecodes::_d2l: // fall through 1336 case Bytecodes::_d2f: tos_in = dtos; break; 1337 default : ShouldNotReachHere(); 1338 } 1339 switch (bytecode()) { 1340 case Bytecodes::_l2i: // fall through 1341 case Bytecodes::_f2i: // fall through 1342 case Bytecodes::_d2i: // fall through 1343 case Bytecodes::_i2b: // fall through 1344 case Bytecodes::_i2c: // fall through 1345 case Bytecodes::_i2s: tos_out = itos; break; 1346 case Bytecodes::_i2l: // fall through 1347 case Bytecodes::_f2l: // fall through 1348 case Bytecodes::_d2l: tos_out = ltos; break; 1349 case Bytecodes::_i2f: // fall through 1350 case Bytecodes::_l2f: // fall through 1351 case Bytecodes::_d2f: tos_out = ftos; break; 1352 case Bytecodes::_i2d: // fall through 1353 case Bytecodes::_l2d: // fall through 1354 case Bytecodes::_f2d: tos_out = dtos; break; 1355 default : ShouldNotReachHere(); 1356 } 1357 transition(tos_in, tos_out); 1358 #endif 1359 1360 1361 // Conversion 1362 Label done; 1363 switch (bytecode()) { 1364 case Bytecodes::_i2l: 1365 // Sign extend the 32 bits 1366 __ sra ( Otos_i, 0, Otos_l ); 1367 break; 1368 1369 case Bytecodes::_i2f: 1370 __ st(Otos_i, __ d_tmp ); 1371 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1372 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1373 break; 1374 1375 case Bytecodes::_i2d: 1376 __ st(Otos_i, __ d_tmp); 1377 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1378 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1379 break; 1380 1381 case Bytecodes::_i2b: 1382 __ sll(Otos_i, 24, Otos_i); 1383 __ sra(Otos_i, 24, Otos_i); 1384 break; 1385 1386 case Bytecodes::_i2c: 1387 __ sll(Otos_i, 16, Otos_i); 1388 __ srl(Otos_i, 16, Otos_i); 1389 break; 1390 1391 case Bytecodes::_i2s: 1392 __ sll(Otos_i, 16, Otos_i); 1393 __ sra(Otos_i, 16, Otos_i); 1394 break; 1395 1396 case Bytecodes::_l2i: 1397 // Sign-extend into the high 32 bits 1398 __ sra(Otos_l, 0, Otos_i); 1399 break; 1400 1401 case Bytecodes::_l2f: 1402 case Bytecodes::_l2d: 1403 __ st_long(Otos_l, __ d_tmp); 1404 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1405 1406 if (bytecode() == Bytecodes::_l2f) { 1407 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1408 } else { 1409 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1410 } 1411 break; 1412 1413 case Bytecodes::_f2i: { 1414 Label isNaN; 1415 // result must be 0 if value is NaN; test by comparing value to itself 1416 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1417 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1418 __ delayed()->clr(Otos_i); // NaN 1419 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1420 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1421 __ ld(__ d_tmp, Otos_i); 1422 __ bind(isNaN); 1423 } 1424 break; 1425 1426 case Bytecodes::_f2l: 1427 // must uncache tos 1428 __ push_f(); 1429 __ pop_f(F1); 1430 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1431 break; 1432 1433 case Bytecodes::_f2d: 1434 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1435 break; 1436 1437 case Bytecodes::_d2i: 1438 case Bytecodes::_d2l: 1439 // must uncache tos 1440 __ push_d(); 1441 // LP64 calling conventions pass first double arg in D0 1442 __ pop_d( Ftos_d ); 1443 __ call_VM_leaf(Lscratch, 1444 bytecode() == Bytecodes::_d2i 1445 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1446 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1447 break; 1448 1449 case Bytecodes::_d2f: 1450 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1451 break; 1452 1453 default: ShouldNotReachHere(); 1454 } 1455 __ bind(done); 1456 } 1457 1458 1459 void TemplateTable::lcmp() { 1460 transition(ltos, itos); 1461 1462 __ pop_l(O1); // pop off value 1, value 2 is in O0 1463 __ lcmp( O1, Otos_l, Otos_i ); 1464 } 1465 1466 1467 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1468 1469 if (is_float) __ pop_f(F2); 1470 else __ pop_d(F2); 1471 1472 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1473 1474 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1475 } 1476 1477 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1478 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1479 __ verify_thread(); 1480 1481 const Register O2_bumped_count = O2; 1482 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1483 1484 // get (wide) offset to O1_disp 1485 const Register O1_disp = O1; 1486 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1487 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1488 1489 // Handle all the JSR stuff here, then exit. 1490 // It's much shorter and cleaner than intermingling with the 1491 // non-JSR normal-branch stuff occurring below. 1492 if( is_jsr ) { 1493 // compute return address as bci in Otos_i 1494 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1495 __ sub(Lbcp, G3_scratch, G3_scratch); 1496 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1497 1498 // Bump Lbcp to target of JSR 1499 __ add(Lbcp, O1_disp, Lbcp); 1500 // Push returnAddress for "ret" on stack 1501 __ push_ptr(Otos_i); 1502 // And away we go! 1503 __ dispatch_next(vtos, 0, true); 1504 return; 1505 } 1506 1507 // Normal (non-jsr) branch handling 1508 1509 // Save the current Lbcp 1510 const Register l_cur_bcp = Lscratch; 1511 __ mov( Lbcp, l_cur_bcp ); 1512 1513 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1514 if ( increment_invocation_counter_for_backward_branches ) { 1515 Label Lforward; 1516 // check branch direction 1517 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1518 // Bump bytecode pointer by displacement (take the branch) 1519 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1520 1521 const Register G3_method_counters = G3_scratch; 1522 __ get_method_counters(Lmethod, G3_method_counters, Lforward); 1523 1524 if (TieredCompilation) { 1525 Label Lno_mdo, Loverflow; 1526 int increment = InvocationCounter::count_increment; 1527 if (ProfileInterpreter) { 1528 // If no method data exists, go to profile_continue. 1529 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1530 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1531 1532 // Increment backedge counter in the MDO 1533 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1534 in_bytes(InvocationCounter::counter_offset())); 1535 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset())); 1536 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1537 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1538 __ ba_short(Loverflow); 1539 } 1540 1541 // If there's no MDO, increment counter in MethodCounters* 1542 __ bind(Lno_mdo); 1543 Address backedge_counter(G3_method_counters, 1544 in_bytes(MethodCounters::backedge_counter_offset()) + 1545 in_bytes(InvocationCounter::counter_offset())); 1546 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset())); 1547 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1548 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1549 __ bind(Loverflow); 1550 1551 // notify point for loop, pass branch bytecode 1552 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1553 1554 // Was an OSR adapter generated? 1555 // O0 = osr nmethod 1556 __ br_null_short(O0, Assembler::pn, Lforward); 1557 1558 // Has the nmethod been invalidated already? 1559 __ ldub(O0, nmethod::state_offset(), O2); 1560 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward); 1561 1562 // migrate the interpreter frame off of the stack 1563 1564 __ mov(G2_thread, L7); 1565 // save nmethod 1566 __ mov(O0, L6); 1567 __ set_last_Java_frame(SP, noreg); 1568 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1569 __ reset_last_Java_frame(); 1570 __ mov(L7, G2_thread); 1571 1572 // move OSR nmethod to I1 1573 __ mov(L6, I1); 1574 1575 // OSR buffer to I0 1576 __ mov(O0, I0); 1577 1578 // remove the interpreter frame 1579 __ restore(I5_savedSP, 0, SP); 1580 1581 // Jump to the osr code. 1582 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1583 __ jmp(O2, G0); 1584 __ delayed()->nop(); 1585 1586 } else { // not TieredCompilation 1587 // Update Backedge branch separately from invocations 1588 const Register G4_invoke_ctr = G4; 1589 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch); 1590 if (ProfileInterpreter) { 1591 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward); 1592 if (UseOnStackReplacement) { 1593 1594 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch); 1595 } 1596 } else { 1597 if (UseOnStackReplacement) { 1598 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch); 1599 } 1600 } 1601 } 1602 1603 __ bind(Lforward); 1604 } else 1605 // Bump bytecode pointer by displacement (take the branch) 1606 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1607 1608 // continue with bytecode @ target 1609 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1610 // %%%%% and changing dispatch_next to dispatch_only 1611 __ dispatch_next(vtos, 0, true); 1612 } 1613 1614 1615 // Note Condition in argument is TemplateTable::Condition 1616 // arg scope is within class scope 1617 1618 void TemplateTable::if_0cmp(Condition cc) { 1619 // no pointers, integer only! 1620 transition(itos, vtos); 1621 // assume branch is more often taken than not (loops use backward branches) 1622 __ cmp( Otos_i, 0); 1623 __ if_cmp(ccNot(cc), false); 1624 } 1625 1626 1627 void TemplateTable::if_icmp(Condition cc) { 1628 transition(itos, vtos); 1629 __ pop_i(O1); 1630 __ cmp(O1, Otos_i); 1631 __ if_cmp(ccNot(cc), false); 1632 } 1633 1634 1635 void TemplateTable::if_nullcmp(Condition cc) { 1636 transition(atos, vtos); 1637 __ tst(Otos_i); 1638 __ if_cmp(ccNot(cc), true); 1639 } 1640 1641 1642 void TemplateTable::if_acmp(Condition cc) { 1643 transition(atos, vtos); 1644 __ pop_ptr(O1); 1645 __ verify_oop(O1); 1646 __ verify_oop(Otos_i); 1647 __ cmp(O1, Otos_i); 1648 __ if_cmp(ccNot(cc), true); 1649 } 1650 1651 1652 1653 void TemplateTable::ret() { 1654 transition(vtos, vtos); 1655 locals_index(G3_scratch); 1656 __ access_local_returnAddress(G3_scratch, Otos_i); 1657 // Otos_i contains the bci, compute the bcp from that 1658 1659 #ifdef ASSERT 1660 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1661 // the result. The return address (really a BCI) was stored with an 1662 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1663 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1664 // loaded value. 1665 { Label zzz ; 1666 __ set (65536, G3_scratch) ; 1667 __ cmp (Otos_i, G3_scratch) ; 1668 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1669 __ delayed()->nop(); 1670 __ stop("BCI is in the wrong register half?"); 1671 __ bind (zzz) ; 1672 } 1673 #endif 1674 1675 __ profile_ret(vtos, Otos_i, G4_scratch); 1676 1677 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1678 __ add(G3_scratch, Otos_i, G3_scratch); 1679 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1680 __ dispatch_next(vtos, 0, true); 1681 } 1682 1683 1684 void TemplateTable::wide_ret() { 1685 transition(vtos, vtos); 1686 locals_index_wide(G3_scratch); 1687 __ access_local_returnAddress(G3_scratch, Otos_i); 1688 // Otos_i contains the bci, compute the bcp from that 1689 1690 __ profile_ret(vtos, Otos_i, G4_scratch); 1691 1692 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1693 __ add(G3_scratch, Otos_i, G3_scratch); 1694 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1695 __ dispatch_next(vtos, 0, true); 1696 } 1697 1698 1699 void TemplateTable::tableswitch() { 1700 transition(itos, vtos); 1701 Label default_case, continue_execution; 1702 1703 // align bcp 1704 __ add(Lbcp, BytesPerInt, O1); 1705 __ and3(O1, -BytesPerInt, O1); 1706 // load lo, hi 1707 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1708 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1709 // Sign extend the 32 bits 1710 __ sra ( Otos_i, 0, Otos_i ); 1711 1712 // check against lo & hi 1713 __ cmp( Otos_i, O2); 1714 __ br( Assembler::less, false, Assembler::pn, default_case); 1715 __ delayed()->cmp( Otos_i, O3 ); 1716 __ br( Assembler::greater, false, Assembler::pn, default_case); 1717 // lookup dispatch offset 1718 __ delayed()->sub(Otos_i, O2, O2); 1719 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1720 __ sll(O2, LogBytesPerInt, O2); 1721 __ add(O2, 3 * BytesPerInt, O2); 1722 __ ba(continue_execution); 1723 __ delayed()->ld(O1, O2, O2); 1724 // handle default 1725 __ bind(default_case); 1726 __ profile_switch_default(O3); 1727 __ ld(O1, 0, O2); // get default offset 1728 // continue execution 1729 __ bind(continue_execution); 1730 __ add(Lbcp, O2, Lbcp); 1731 __ dispatch_next(vtos, 0, true); 1732 } 1733 1734 1735 void TemplateTable::lookupswitch() { 1736 transition(itos, itos); 1737 __ stop("lookupswitch bytecode should have been rewritten"); 1738 } 1739 1740 void TemplateTable::fast_linearswitch() { 1741 transition(itos, vtos); 1742 Label loop_entry, loop, found, continue_execution; 1743 // align bcp 1744 __ add(Lbcp, BytesPerInt, O1); 1745 __ and3(O1, -BytesPerInt, O1); 1746 // set counter 1747 __ ld(O1, BytesPerInt, O2); 1748 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1749 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1750 __ ba(loop_entry); 1751 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1752 1753 // table search 1754 __ bind(loop); 1755 __ cmp(O4, Otos_i); 1756 __ br(Assembler::equal, true, Assembler::pn, found); 1757 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1758 __ inc(O3, 2 * BytesPerInt); 1759 1760 __ bind(loop_entry); 1761 __ cmp(O2, O3); 1762 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1763 __ delayed()->ld(O3, 0, O4); 1764 1765 // default case 1766 __ ld(O1, 0, O4); // get default offset 1767 if (ProfileInterpreter) { 1768 __ profile_switch_default(O3); 1769 __ ba_short(continue_execution); 1770 } 1771 1772 // entry found -> get offset 1773 __ bind(found); 1774 if (ProfileInterpreter) { 1775 __ sub(O3, O1, O3); 1776 __ sub(O3, 2*BytesPerInt, O3); 1777 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1778 __ profile_switch_case(O3, O1, O2, G3_scratch); 1779 1780 __ bind(continue_execution); 1781 } 1782 __ add(Lbcp, O4, Lbcp); 1783 __ dispatch_next(vtos, 0, true); 1784 } 1785 1786 1787 void TemplateTable::fast_binaryswitch() { 1788 transition(itos, vtos); 1789 // Implementation using the following core algorithm: (copied from Intel) 1790 // 1791 // int binary_search(int key, LookupswitchPair* array, int n) { 1792 // // Binary search according to "Methodik des Programmierens" by 1793 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1794 // int i = 0; 1795 // int j = n; 1796 // while (i+1 < j) { 1797 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1798 // // with Q: for all i: 0 <= i < n: key < a[i] 1799 // // where a stands for the array and assuming that the (inexisting) 1800 // // element a[n] is infinitely big. 1801 // int h = (i + j) >> 1; 1802 // // i < h < j 1803 // if (key < array[h].fast_match()) { 1804 // j = h; 1805 // } else { 1806 // i = h; 1807 // } 1808 // } 1809 // // R: a[i] <= key < a[i+1] or Q 1810 // // (i.e., if key is within array, i is the correct index) 1811 // return i; 1812 // } 1813 1814 // register allocation 1815 assert(Otos_i == O0, "alias checking"); 1816 const Register Rkey = Otos_i; // already set (tosca) 1817 const Register Rarray = O1; 1818 const Register Ri = O2; 1819 const Register Rj = O3; 1820 const Register Rh = O4; 1821 const Register Rscratch = O5; 1822 1823 const int log_entry_size = 3; 1824 const int entry_size = 1 << log_entry_size; 1825 1826 Label found; 1827 // Find Array start 1828 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1829 __ and3(Rarray, -BytesPerInt, Rarray); 1830 // initialize i & j (in delay slot) 1831 __ clr( Ri ); 1832 1833 // and start 1834 Label entry; 1835 __ ba(entry); 1836 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1837 // (Rj is already in the native byte-ordering.) 1838 1839 // binary search loop 1840 { Label loop; 1841 __ bind( loop ); 1842 // int h = (i + j) >> 1; 1843 __ sra( Rh, 1, Rh ); 1844 // if (key < array[h].fast_match()) { 1845 // j = h; 1846 // } else { 1847 // i = h; 1848 // } 1849 __ sll( Rh, log_entry_size, Rscratch ); 1850 __ ld( Rarray, Rscratch, Rscratch ); 1851 // (Rscratch is already in the native byte-ordering.) 1852 __ cmp( Rkey, Rscratch ); 1853 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1854 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1855 1856 // while (i+1 < j) 1857 __ bind( entry ); 1858 __ add( Ri, 1, Rscratch ); 1859 __ cmp(Rscratch, Rj); 1860 __ br( Assembler::less, true, Assembler::pt, loop ); 1861 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1862 } 1863 1864 // end of binary search, result index is i (must check again!) 1865 Label default_case; 1866 Label continue_execution; 1867 if (ProfileInterpreter) { 1868 __ mov( Ri, Rh ); // Save index in i for profiling 1869 } 1870 __ sll( Ri, log_entry_size, Ri ); 1871 __ ld( Rarray, Ri, Rscratch ); 1872 // (Rscratch is already in the native byte-ordering.) 1873 __ cmp( Rkey, Rscratch ); 1874 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1875 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1876 1877 // entry found -> j = offset 1878 __ inc( Ri, BytesPerInt ); 1879 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1880 __ ld( Rarray, Ri, Rj ); 1881 // (Rj is already in the native byte-ordering.) 1882 1883 if (ProfileInterpreter) { 1884 __ ba_short(continue_execution); 1885 } 1886 1887 __ bind(default_case); // fall through (if not profiling) 1888 __ profile_switch_default(Ri); 1889 1890 __ bind(continue_execution); 1891 __ add( Lbcp, Rj, Lbcp ); 1892 __ dispatch_next(vtos, 0, true); 1893 } 1894 1895 1896 void TemplateTable::_return(TosState state) { 1897 transition(state, state); 1898 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1899 1900 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1901 assert(state == vtos, "only valid state"); 1902 __ mov(G0, G3_scratch); 1903 __ access_local_ptr(G3_scratch, Otos_i); 1904 __ load_klass(Otos_i, O2); 1905 __ set(JVM_ACC_HAS_FINALIZER, G3); 1906 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 1907 __ andcc(G3, O2, G0); 1908 Label skip_register_finalizer; 1909 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 1910 __ delayed()->nop(); 1911 1912 // Call out to do finalizer registration 1913 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 1914 1915 __ bind(skip_register_finalizer); 1916 } 1917 1918 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 1919 Label no_safepoint; 1920 __ ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0); 1921 __ btst(SafepointMechanism::poll_bit(), G3_scratch); 1922 __ br(Assembler::zero, false, Assembler::pt, no_safepoint); 1923 __ delayed()->nop(); 1924 __ push(state); 1925 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 1926 __ pop(state); 1927 __ bind(no_safepoint); 1928 } 1929 1930 // Narrow result if state is itos but result type is smaller. 1931 // Need to narrow in the return bytecode rather than in generate_return_entry 1932 // since compiled code callers expect the result to already be narrowed. 1933 if (state == itos) { 1934 __ narrow(Otos_i); 1935 } 1936 __ remove_activation(state, /* throw_monitor_exception */ true); 1937 1938 // The caller's SP was adjusted upon method entry to accomodate 1939 // the callee's non-argument locals. Undo that adjustment. 1940 __ ret(); // return to caller 1941 __ delayed()->restore(I5_savedSP, G0, SP); 1942 } 1943 1944 1945 // ---------------------------------------------------------------------------- 1946 // Volatile variables demand their effects be made known to all CPU's in 1947 // order. Store buffers on most chips allow reads & writes to reorder; the 1948 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1949 // memory barrier (i.e., it's not sufficient that the interpreter does not 1950 // reorder volatile references, the hardware also must not reorder them). 1951 // 1952 // According to the new Java Memory Model (JMM): 1953 // (1) All volatiles are serialized wrt to each other. 1954 // ALSO reads & writes act as aquire & release, so: 1955 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 1956 // the read float up to before the read. It's OK for non-volatile memory refs 1957 // that happen before the volatile read to float down below it. 1958 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1959 // that happen BEFORE the write float down to after the write. It's OK for 1960 // non-volatile memory refs that happen after the volatile write to float up 1961 // before it. 1962 // 1963 // We only put in barriers around volatile refs (they are expensive), not 1964 // _between_ memory refs (that would require us to track the flavor of the 1965 // previous memory refs). Requirements (2) and (3) require some barriers 1966 // before volatile stores and after volatile loads. These nearly cover 1967 // requirement (1) but miss the volatile-store-volatile-load case. This final 1968 // case is placed after volatile-stores although it could just as well go 1969 // before volatile-loads. 1970 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 1971 // Helper function to insert a is-volatile test and memory barrier 1972 // All current sparc implementations run in TSO, needing only StoreLoad 1973 if ((order_constraint & Assembler::StoreLoad) == 0) return; 1974 __ membar( order_constraint ); 1975 } 1976 1977 // ---------------------------------------------------------------------------- 1978 void TemplateTable::resolve_cache_and_index(int byte_no, 1979 Register Rcache, 1980 Register index, 1981 size_t index_size) { 1982 // Depends on cpCacheOop layout! 1983 1984 Label resolved; 1985 Bytecodes::Code code = bytecode(); 1986 switch (code) { 1987 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 1988 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 1989 } 1990 1991 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 1992 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 1993 __ cmp(Lbyte_code, code); // have we resolved this bytecode? 1994 __ br(Assembler::equal, false, Assembler::pt, resolved); 1995 __ delayed()->set(code, O1); 1996 1997 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 1998 // first time invocation - must resolve first 1999 __ call_VM(noreg, entry, O1); 2000 // Update registers with resolved info 2001 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2002 __ bind(resolved); 2003 } 2004 2005 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2006 Register method, 2007 Register itable_index, 2008 Register flags, 2009 bool is_invokevirtual, 2010 bool is_invokevfinal, 2011 bool is_invokedynamic) { 2012 // Uses both G3_scratch and G4_scratch 2013 Register cache = G3_scratch; 2014 Register index = G4_scratch; 2015 assert_different_registers(cache, method, itable_index); 2016 2017 // determine constant pool cache field offsets 2018 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2019 const int method_offset = in_bytes( 2020 ConstantPoolCache::base_offset() + 2021 ((byte_no == f2_byte) 2022 ? ConstantPoolCacheEntry::f2_offset() 2023 : ConstantPoolCacheEntry::f1_offset() 2024 ) 2025 ); 2026 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2027 ConstantPoolCacheEntry::flags_offset()); 2028 // access constant pool cache fields 2029 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2030 ConstantPoolCacheEntry::f2_offset()); 2031 2032 if (is_invokevfinal) { 2033 __ get_cache_and_index_at_bcp(cache, index, 1); 2034 __ ld_ptr(Address(cache, method_offset), method); 2035 } else { 2036 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2037 resolve_cache_and_index(byte_no, cache, index, index_size); 2038 __ ld_ptr(Address(cache, method_offset), method); 2039 } 2040 2041 if (itable_index != noreg) { 2042 // pick up itable or appendix index from f2 also: 2043 __ ld_ptr(Address(cache, index_offset), itable_index); 2044 } 2045 __ ld_ptr(Address(cache, flags_offset), flags); 2046 } 2047 2048 // The Rcache register must be set before call 2049 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2050 Register Rcache, 2051 Register index, 2052 Register Roffset, 2053 Register Rflags, 2054 bool is_static) { 2055 assert_different_registers(Rcache, Rflags, Roffset); 2056 2057 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2058 2059 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2060 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2061 if (is_static) { 2062 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2063 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2064 __ ld_ptr( Robj, mirror_offset, Robj); 2065 __ resolve_oop_handle(Robj); 2066 } 2067 } 2068 2069 // The registers Rcache and index expected to be set before call. 2070 // Correct values of the Rcache and index registers are preserved. 2071 void TemplateTable::jvmti_post_field_access(Register Rcache, 2072 Register index, 2073 bool is_static, 2074 bool has_tos) { 2075 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2076 2077 if (JvmtiExport::can_post_field_access()) { 2078 // Check to see if a field access watch has been set before we take 2079 // the time to call into the VM. 2080 Label Label1; 2081 assert_different_registers(Rcache, index, G1_scratch); 2082 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2083 __ load_contents(get_field_access_count_addr, G1_scratch); 2084 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2085 2086 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2087 2088 if (is_static) { 2089 __ clr(Otos_i); 2090 } else { 2091 if (has_tos) { 2092 // save object pointer before call_VM() clobbers it 2093 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2094 } else { 2095 // Load top of stack (do not pop the value off the stack); 2096 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2097 } 2098 __ verify_oop(Otos_i); 2099 } 2100 // Otos_i: object pointer or NULL if static 2101 // Rcache: cache entry pointer 2102 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2103 Otos_i, Rcache); 2104 if (!is_static && has_tos) { 2105 __ pop_ptr(Otos_i); // restore object pointer 2106 __ verify_oop(Otos_i); 2107 } 2108 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2109 __ bind(Label1); 2110 } 2111 } 2112 2113 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2114 transition(vtos, vtos); 2115 2116 Register Rcache = G3_scratch; 2117 Register index = G4_scratch; 2118 Register Rclass = Rcache; 2119 Register Roffset= G4_scratch; 2120 Register Rflags = G1_scratch; 2121 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2122 2123 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2124 jvmti_post_field_access(Rcache, index, is_static, false); 2125 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2126 2127 if (!is_static) { 2128 pop_and_check_object(Rclass); 2129 } else { 2130 __ verify_oop(Rclass); 2131 } 2132 2133 Label exit; 2134 2135 Assembler::Membar_mask_bits membar_bits = 2136 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2137 2138 if (__ membar_has_effect(membar_bits)) { 2139 // Get volatile flag 2140 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2141 __ and3(Rflags, Lscratch, Lscratch); 2142 } 2143 2144 Label checkVolatile; 2145 2146 // compute field type 2147 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; 2148 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2149 // Make sure we don't need to mask Rflags after the above shift 2150 ConstantPoolCacheEntry::verify_tos_state_shift(); 2151 2152 // Check atos before itos for getstatic, more likely (in Queens at least) 2153 __ cmp(Rflags, atos); 2154 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2155 __ delayed() ->cmp(Rflags, itos); 2156 2157 // atos 2158 __ load_heap_oop(Rclass, Roffset, Otos_i); 2159 __ verify_oop(Otos_i); 2160 __ push(atos); 2161 if (!is_static && rc == may_rewrite) { 2162 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2163 } 2164 __ ba(checkVolatile); 2165 __ delayed()->tst(Lscratch); 2166 2167 __ bind(notObj); 2168 2169 // cmp(Rflags, itos); 2170 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2171 __ delayed() ->cmp(Rflags, ltos); 2172 2173 // itos 2174 __ ld(Rclass, Roffset, Otos_i); 2175 __ push(itos); 2176 if (!is_static && rc == may_rewrite) { 2177 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2178 } 2179 __ ba(checkVolatile); 2180 __ delayed()->tst(Lscratch); 2181 2182 __ bind(notInt); 2183 2184 // cmp(Rflags, ltos); 2185 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2186 __ delayed() ->cmp(Rflags, btos); 2187 2188 // ltos 2189 // load must be atomic 2190 __ ld_long(Rclass, Roffset, Otos_l); 2191 __ push(ltos); 2192 if (!is_static && rc == may_rewrite) { 2193 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2194 } 2195 __ ba(checkVolatile); 2196 __ delayed()->tst(Lscratch); 2197 2198 __ bind(notLong); 2199 2200 // cmp(Rflags, btos); 2201 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2202 __ delayed() ->cmp(Rflags, ztos); 2203 2204 // btos 2205 __ ldsb(Rclass, Roffset, Otos_i); 2206 __ push(itos); 2207 if (!is_static && rc == may_rewrite) { 2208 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2209 } 2210 __ ba(checkVolatile); 2211 __ delayed()->tst(Lscratch); 2212 2213 __ bind(notByte); 2214 2215 // cmp(Rflags, ztos); 2216 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2217 __ delayed() ->cmp(Rflags, ctos); 2218 2219 // ztos 2220 __ ldsb(Rclass, Roffset, Otos_i); 2221 __ push(itos); 2222 if (!is_static && rc == may_rewrite) { 2223 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2224 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2225 } 2226 __ ba(checkVolatile); 2227 __ delayed()->tst(Lscratch); 2228 2229 __ bind(notBool); 2230 2231 // cmp(Rflags, ctos); 2232 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2233 __ delayed() ->cmp(Rflags, stos); 2234 2235 // ctos 2236 __ lduh(Rclass, Roffset, Otos_i); 2237 __ push(itos); 2238 if (!is_static && rc == may_rewrite) { 2239 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2240 } 2241 __ ba(checkVolatile); 2242 __ delayed()->tst(Lscratch); 2243 2244 __ bind(notChar); 2245 2246 // cmp(Rflags, stos); 2247 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2248 __ delayed() ->cmp(Rflags, ftos); 2249 2250 // stos 2251 __ ldsh(Rclass, Roffset, Otos_i); 2252 __ push(itos); 2253 if (!is_static && rc == may_rewrite) { 2254 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2255 } 2256 __ ba(checkVolatile); 2257 __ delayed()->tst(Lscratch); 2258 2259 __ bind(notShort); 2260 2261 2262 // cmp(Rflags, ftos); 2263 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2264 __ delayed() ->tst(Lscratch); 2265 2266 // ftos 2267 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2268 __ push(ftos); 2269 if (!is_static && rc == may_rewrite) { 2270 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2271 } 2272 __ ba(checkVolatile); 2273 __ delayed()->tst(Lscratch); 2274 2275 __ bind(notFloat); 2276 2277 2278 // dtos 2279 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2280 __ push(dtos); 2281 if (!is_static && rc == may_rewrite) { 2282 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2283 } 2284 2285 __ bind(checkVolatile); 2286 if (__ membar_has_effect(membar_bits)) { 2287 // __ tst(Lscratch); executed in delay slot 2288 __ br(Assembler::zero, false, Assembler::pt, exit); 2289 __ delayed()->nop(); 2290 volatile_barrier(membar_bits); 2291 } 2292 2293 __ bind(exit); 2294 } 2295 2296 void TemplateTable::getfield(int byte_no) { 2297 getfield_or_static(byte_no, false); 2298 } 2299 2300 void TemplateTable::nofast_getfield(int byte_no) { 2301 getfield_or_static(byte_no, false, may_not_rewrite); 2302 } 2303 2304 void TemplateTable::getstatic(int byte_no) { 2305 getfield_or_static(byte_no, true); 2306 } 2307 2308 void TemplateTable::fast_accessfield(TosState state) { 2309 transition(atos, state); 2310 Register Rcache = G3_scratch; 2311 Register index = G4_scratch; 2312 Register Roffset = G4_scratch; 2313 Register Rflags = Rcache; 2314 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2315 2316 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2317 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2318 2319 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2320 2321 __ null_check(Otos_i); 2322 __ verify_oop(Otos_i); 2323 2324 Label exit; 2325 2326 Assembler::Membar_mask_bits membar_bits = 2327 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2328 if (__ membar_has_effect(membar_bits)) { 2329 // Get volatile flag 2330 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2331 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2332 } 2333 2334 switch (bytecode()) { 2335 case Bytecodes::_fast_bgetfield: 2336 __ ldsb(Otos_i, Roffset, Otos_i); 2337 break; 2338 case Bytecodes::_fast_cgetfield: 2339 __ lduh(Otos_i, Roffset, Otos_i); 2340 break; 2341 case Bytecodes::_fast_sgetfield: 2342 __ ldsh(Otos_i, Roffset, Otos_i); 2343 break; 2344 case Bytecodes::_fast_igetfield: 2345 __ ld(Otos_i, Roffset, Otos_i); 2346 break; 2347 case Bytecodes::_fast_lgetfield: 2348 __ ld_long(Otos_i, Roffset, Otos_l); 2349 break; 2350 case Bytecodes::_fast_fgetfield: 2351 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2352 break; 2353 case Bytecodes::_fast_dgetfield: 2354 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2355 break; 2356 case Bytecodes::_fast_agetfield: 2357 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2358 break; 2359 default: 2360 ShouldNotReachHere(); 2361 } 2362 2363 if (__ membar_has_effect(membar_bits)) { 2364 __ btst(Lscratch, Rflags); 2365 __ br(Assembler::zero, false, Assembler::pt, exit); 2366 __ delayed()->nop(); 2367 volatile_barrier(membar_bits); 2368 __ bind(exit); 2369 } 2370 2371 if (state == atos) { 2372 __ verify_oop(Otos_i); // does not blow flags! 2373 } 2374 } 2375 2376 void TemplateTable::jvmti_post_fast_field_mod() { 2377 if (JvmtiExport::can_post_field_modification()) { 2378 // Check to see if a field modification watch has been set before we take 2379 // the time to call into the VM. 2380 Label done; 2381 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2382 __ load_contents(get_field_modification_count_addr, G4_scratch); 2383 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2384 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2385 __ verify_oop(G4_scratch); 2386 __ push_ptr(G4_scratch); // put the object pointer back on tos 2387 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2388 // Save tos values before call_VM() clobbers them. Since we have 2389 // to do it for every data type, we use the saved values as the 2390 // jvalue object. 2391 switch (bytecode()) { // save tos values before call_VM() clobbers them 2392 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2393 case Bytecodes::_fast_bputfield: // fall through 2394 case Bytecodes::_fast_zputfield: // fall through 2395 case Bytecodes::_fast_sputfield: // fall through 2396 case Bytecodes::_fast_cputfield: // fall through 2397 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2398 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2399 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2400 // get words in right order for use as jvalue object 2401 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2402 } 2403 // setup pointer to jvalue object 2404 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2405 // G4_scratch: object pointer 2406 // G1_scratch: cache entry pointer 2407 // G3_scratch: jvalue object on the stack 2408 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2409 switch (bytecode()) { // restore tos values 2410 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2411 case Bytecodes::_fast_bputfield: // fall through 2412 case Bytecodes::_fast_zputfield: // fall through 2413 case Bytecodes::_fast_sputfield: // fall through 2414 case Bytecodes::_fast_cputfield: // fall through 2415 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2416 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2417 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2418 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2419 } 2420 __ bind(done); 2421 } 2422 } 2423 2424 // The registers Rcache and index expected to be set before call. 2425 // The function may destroy various registers, just not the Rcache and index registers. 2426 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2427 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2428 2429 if (JvmtiExport::can_post_field_modification()) { 2430 // Check to see if a field modification watch has been set before we take 2431 // the time to call into the VM. 2432 Label Label1; 2433 assert_different_registers(Rcache, index, G1_scratch); 2434 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2435 __ load_contents(get_field_modification_count_addr, G1_scratch); 2436 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2437 2438 // The Rcache and index registers have been already set. 2439 // This allows to eliminate this call but the Rcache and index 2440 // registers must be correspondingly used after this line. 2441 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2442 2443 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2444 if (is_static) { 2445 // Life is simple. Null out the object pointer. 2446 __ clr(G4_scratch); 2447 } else { 2448 Register Rflags = G1_scratch; 2449 // Life is harder. The stack holds the value on top, followed by the 2450 // object. We don't know the size of the value, though; it could be 2451 // one or two words depending on its type. As a result, we must find 2452 // the type to determine where the object is. 2453 2454 Label two_word, valsizeknown; 2455 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2456 __ mov(Lesp, G4_scratch); 2457 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2458 // Make sure we don't need to mask Rflags after the above shift 2459 ConstantPoolCacheEntry::verify_tos_state_shift(); 2460 __ cmp(Rflags, ltos); 2461 __ br(Assembler::equal, false, Assembler::pt, two_word); 2462 __ delayed()->cmp(Rflags, dtos); 2463 __ br(Assembler::equal, false, Assembler::pt, two_word); 2464 __ delayed()->nop(); 2465 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2466 __ ba_short(valsizeknown); 2467 __ bind(two_word); 2468 2469 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2470 2471 __ bind(valsizeknown); 2472 // setup object pointer 2473 __ ld_ptr(G4_scratch, 0, G4_scratch); 2474 __ verify_oop(G4_scratch); 2475 } 2476 // setup pointer to jvalue object 2477 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2478 // G4_scratch: object pointer or NULL if static 2479 // G3_scratch: cache entry pointer 2480 // G1_scratch: jvalue object on the stack 2481 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2482 G4_scratch, G3_scratch, G1_scratch); 2483 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2484 __ bind(Label1); 2485 } 2486 } 2487 2488 void TemplateTable::pop_and_check_object(Register r) { 2489 __ pop_ptr(r); 2490 __ null_check(r); // for field access must check obj. 2491 __ verify_oop(r); 2492 } 2493 2494 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2495 transition(vtos, vtos); 2496 Register Rcache = G3_scratch; 2497 Register index = G4_scratch; 2498 Register Rclass = Rcache; 2499 Register Roffset= G4_scratch; 2500 Register Rflags = G1_scratch; 2501 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2502 2503 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2504 jvmti_post_field_mod(Rcache, index, is_static); 2505 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2506 2507 Assembler::Membar_mask_bits read_bits = 2508 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2509 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2510 2511 Label notVolatile, checkVolatile, exit; 2512 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2513 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2514 __ and3(Rflags, Lscratch, Lscratch); 2515 2516 if (__ membar_has_effect(read_bits)) { 2517 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2518 volatile_barrier(read_bits); 2519 __ bind(notVolatile); 2520 } 2521 } 2522 2523 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2524 // Make sure we don't need to mask Rflags after the above shift 2525 ConstantPoolCacheEntry::verify_tos_state_shift(); 2526 2527 // compute field type 2528 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat; 2529 2530 if (is_static) { 2531 // putstatic with object type most likely, check that first 2532 __ cmp(Rflags, atos); 2533 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2534 __ delayed()->cmp(Rflags, itos); 2535 2536 // atos 2537 { 2538 __ pop_ptr(); 2539 __ verify_oop(Otos_i); 2540 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2541 __ ba(checkVolatile); 2542 __ delayed()->tst(Lscratch); 2543 } 2544 2545 __ bind(notObj); 2546 // cmp(Rflags, itos); 2547 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2548 __ delayed()->cmp(Rflags, btos); 2549 2550 // itos 2551 { 2552 __ pop_i(); 2553 __ st(Otos_i, Rclass, Roffset); 2554 __ ba(checkVolatile); 2555 __ delayed()->tst(Lscratch); 2556 } 2557 2558 __ bind(notInt); 2559 } else { 2560 // putfield with int type most likely, check that first 2561 __ cmp(Rflags, itos); 2562 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2563 __ delayed()->cmp(Rflags, atos); 2564 2565 // itos 2566 { 2567 __ pop_i(); 2568 pop_and_check_object(Rclass); 2569 __ st(Otos_i, Rclass, Roffset); 2570 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2571 __ ba(checkVolatile); 2572 __ delayed()->tst(Lscratch); 2573 } 2574 2575 __ bind(notInt); 2576 // cmp(Rflags, atos); 2577 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2578 __ delayed()->cmp(Rflags, btos); 2579 2580 // atos 2581 { 2582 __ pop_ptr(); 2583 pop_and_check_object(Rclass); 2584 __ verify_oop(Otos_i); 2585 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2586 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2587 __ ba(checkVolatile); 2588 __ delayed()->tst(Lscratch); 2589 } 2590 2591 __ bind(notObj); 2592 } 2593 2594 // cmp(Rflags, btos); 2595 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2596 __ delayed()->cmp(Rflags, ztos); 2597 2598 // btos 2599 { 2600 __ pop_i(); 2601 if (!is_static) pop_and_check_object(Rclass); 2602 __ stb(Otos_i, Rclass, Roffset); 2603 if (!is_static && rc == may_rewrite) { 2604 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2605 } 2606 __ ba(checkVolatile); 2607 __ delayed()->tst(Lscratch); 2608 } 2609 2610 __ bind(notByte); 2611 2612 // cmp(Rflags, btos); 2613 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2614 __ delayed()->cmp(Rflags, ltos); 2615 2616 // ztos 2617 { 2618 __ pop_i(); 2619 if (!is_static) pop_and_check_object(Rclass); 2620 __ and3(Otos_i, 1, Otos_i); 2621 __ stb(Otos_i, Rclass, Roffset); 2622 if (!is_static && rc == may_rewrite) { 2623 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no); 2624 } 2625 __ ba(checkVolatile); 2626 __ delayed()->tst(Lscratch); 2627 } 2628 2629 __ bind(notBool); 2630 // cmp(Rflags, ltos); 2631 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2632 __ delayed()->cmp(Rflags, ctos); 2633 2634 // ltos 2635 { 2636 __ pop_l(); 2637 if (!is_static) pop_and_check_object(Rclass); 2638 __ st_long(Otos_l, Rclass, Roffset); 2639 if (!is_static && rc == may_rewrite) { 2640 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2641 } 2642 __ ba(checkVolatile); 2643 __ delayed()->tst(Lscratch); 2644 } 2645 2646 __ bind(notLong); 2647 // cmp(Rflags, ctos); 2648 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2649 __ delayed()->cmp(Rflags, stos); 2650 2651 // ctos (char) 2652 { 2653 __ pop_i(); 2654 if (!is_static) pop_and_check_object(Rclass); 2655 __ sth(Otos_i, Rclass, Roffset); 2656 if (!is_static && rc == may_rewrite) { 2657 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2658 } 2659 __ ba(checkVolatile); 2660 __ delayed()->tst(Lscratch); 2661 } 2662 2663 __ bind(notChar); 2664 // cmp(Rflags, stos); 2665 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2666 __ delayed()->cmp(Rflags, ftos); 2667 2668 // stos (short) 2669 { 2670 __ pop_i(); 2671 if (!is_static) pop_and_check_object(Rclass); 2672 __ sth(Otos_i, Rclass, Roffset); 2673 if (!is_static && rc == may_rewrite) { 2674 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2675 } 2676 __ ba(checkVolatile); 2677 __ delayed()->tst(Lscratch); 2678 } 2679 2680 __ bind(notShort); 2681 // cmp(Rflags, ftos); 2682 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2683 __ delayed()->nop(); 2684 2685 // ftos 2686 { 2687 __ pop_f(); 2688 if (!is_static) pop_and_check_object(Rclass); 2689 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2690 if (!is_static && rc == may_rewrite) { 2691 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2692 } 2693 __ ba(checkVolatile); 2694 __ delayed()->tst(Lscratch); 2695 } 2696 2697 __ bind(notFloat); 2698 2699 // dtos 2700 { 2701 __ pop_d(); 2702 if (!is_static) pop_and_check_object(Rclass); 2703 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2704 if (!is_static && rc == may_rewrite) { 2705 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2706 } 2707 } 2708 2709 __ bind(checkVolatile); 2710 __ tst(Lscratch); 2711 2712 if (__ membar_has_effect(write_bits)) { 2713 // __ tst(Lscratch); in delay slot 2714 __ br(Assembler::zero, false, Assembler::pt, exit); 2715 __ delayed()->nop(); 2716 volatile_barrier(Assembler::StoreLoad); 2717 __ bind(exit); 2718 } 2719 } 2720 2721 void TemplateTable::fast_storefield(TosState state) { 2722 transition(state, vtos); 2723 Register Rcache = G3_scratch; 2724 Register Rclass = Rcache; 2725 Register Roffset= G4_scratch; 2726 Register Rflags = G1_scratch; 2727 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2728 2729 jvmti_post_fast_field_mod(); 2730 2731 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2732 2733 Assembler::Membar_mask_bits read_bits = 2734 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2735 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2736 2737 Label notVolatile, checkVolatile, exit; 2738 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2739 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2740 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2741 __ and3(Rflags, Lscratch, Lscratch); 2742 if (__ membar_has_effect(read_bits)) { 2743 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2744 volatile_barrier(read_bits); 2745 __ bind(notVolatile); 2746 } 2747 } 2748 2749 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2750 pop_and_check_object(Rclass); 2751 2752 switch (bytecode()) { 2753 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield 2754 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2755 case Bytecodes::_fast_cputfield: /* fall through */ 2756 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2757 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2758 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2759 case Bytecodes::_fast_fputfield: 2760 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2761 break; 2762 case Bytecodes::_fast_dputfield: 2763 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2764 break; 2765 case Bytecodes::_fast_aputfield: 2766 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2767 break; 2768 default: 2769 ShouldNotReachHere(); 2770 } 2771 2772 if (__ membar_has_effect(write_bits)) { 2773 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2774 volatile_barrier(Assembler::StoreLoad); 2775 __ bind(exit); 2776 } 2777 } 2778 2779 void TemplateTable::putfield(int byte_no) { 2780 putfield_or_static(byte_no, false); 2781 } 2782 2783 void TemplateTable::nofast_putfield(int byte_no) { 2784 putfield_or_static(byte_no, false, may_not_rewrite); 2785 } 2786 2787 void TemplateTable::putstatic(int byte_no) { 2788 putfield_or_static(byte_no, true); 2789 } 2790 2791 void TemplateTable::fast_xaccess(TosState state) { 2792 transition(vtos, state); 2793 Register Rcache = G3_scratch; 2794 Register Roffset = G4_scratch; 2795 Register Rflags = G4_scratch; 2796 Register Rreceiver = Lscratch; 2797 2798 __ ld_ptr(Llocals, 0, Rreceiver); 2799 2800 // access constant pool cache (is resolved) 2801 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2802 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2803 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2804 2805 __ verify_oop(Rreceiver); 2806 __ null_check(Rreceiver); 2807 if (state == atos) { 2808 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2809 } else if (state == itos) { 2810 __ ld (Rreceiver, Roffset, Otos_i) ; 2811 } else if (state == ftos) { 2812 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2813 } else { 2814 ShouldNotReachHere(); 2815 } 2816 2817 Assembler::Membar_mask_bits membar_bits = 2818 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2819 if (__ membar_has_effect(membar_bits)) { 2820 2821 // Get is_volatile value in Rflags and check if membar is needed 2822 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2823 2824 // Test volatile 2825 Label notVolatile; 2826 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2827 __ btst(Rflags, Lscratch); 2828 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2829 __ delayed()->nop(); 2830 volatile_barrier(membar_bits); 2831 __ bind(notVolatile); 2832 } 2833 2834 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2835 __ sub(Lbcp, 1, Lbcp); 2836 } 2837 2838 //---------------------------------------------------------------------------------------------------- 2839 // Calls 2840 2841 void TemplateTable::count_calls(Register method, Register temp) { 2842 // implemented elsewhere 2843 ShouldNotReachHere(); 2844 } 2845 2846 void TemplateTable::prepare_invoke(int byte_no, 2847 Register method, // linked method (or i-klass) 2848 Register ra, // return address 2849 Register index, // itable index, MethodType, etc. 2850 Register recv, // if caller wants to see it 2851 Register flags // if caller wants to test it 2852 ) { 2853 // determine flags 2854 const Bytecodes::Code code = bytecode(); 2855 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2856 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2857 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2858 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2859 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2860 const bool load_receiver = (recv != noreg); 2861 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2862 assert(recv == noreg || recv == O0, ""); 2863 assert(flags == noreg || flags == O1, ""); 2864 2865 // setup registers & access constant pool cache 2866 if (recv == noreg) recv = O0; 2867 if (flags == noreg) flags = O1; 2868 const Register temp = O2; 2869 assert_different_registers(method, ra, index, recv, flags, temp); 2870 2871 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2872 2873 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2874 2875 // maybe push appendix to arguments 2876 if (is_invokedynamic || is_invokehandle) { 2877 Label L_no_push; 2878 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2879 __ btst(flags, temp); 2880 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2881 __ delayed()->nop(); 2882 // Push the appendix as a trailing parameter. 2883 // This must be done before we get the receiver, 2884 // since the parameter_size includes it. 2885 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2886 __ load_resolved_reference_at_index(temp, index); 2887 __ verify_oop(temp); 2888 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2889 __ bind(L_no_push); 2890 } 2891 2892 // load receiver if needed (after appendix is pushed so parameter size is correct) 2893 if (load_receiver) { 2894 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2895 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2896 __ verify_oop(recv); 2897 } 2898 2899 // compute return type 2900 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2901 // Make sure we don't need to mask flags after the above shift 2902 ConstantPoolCacheEntry::verify_tos_state_shift(); 2903 // load return address 2904 { 2905 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2906 AddressLiteral table(table_addr); 2907 __ set(table, temp); 2908 __ sll(ra, LogBytesPerWord, ra); 2909 __ ld_ptr(Address(temp, ra), ra); 2910 } 2911 } 2912 2913 2914 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2915 Register Rtemp = G4_scratch; 2916 Register Rcall = Rindex; 2917 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2918 2919 // get target Method* & entry point 2920 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2921 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 2922 __ profile_called_method(G5_method, Rtemp); 2923 __ call_from_interpreter(Rcall, Gargs, Rret); 2924 } 2925 2926 void TemplateTable::invokevirtual(int byte_no) { 2927 transition(vtos, vtos); 2928 assert(byte_no == f2_byte, "use this argument"); 2929 2930 Register Rscratch = G3_scratch; 2931 Register Rtemp = G4_scratch; 2932 Register Rret = Lscratch; 2933 Register O0_recv = O0; 2934 Label notFinal; 2935 2936 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2937 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2938 2939 // Check for vfinal 2940 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2941 __ btst(Rret, G4_scratch); 2942 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2943 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2944 2945 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 2946 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 2947 } 2948 2949 invokevfinal_helper(Rscratch, Rret); 2950 2951 __ bind(notFinal); 2952 2953 __ mov(G5_method, Rscratch); // better scratch register 2954 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 2955 // receiver is in O0_recv 2956 __ verify_oop(O0_recv); 2957 2958 // get return address 2959 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2960 __ set(table, Rtemp); 2961 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2962 // Make sure we don't need to mask Rret after the above shift 2963 ConstantPoolCacheEntry::verify_tos_state_shift(); 2964 __ sll(Rret, LogBytesPerWord, Rret); 2965 __ ld_ptr(Rtemp, Rret, Rret); // get return address 2966 2967 // get receiver klass 2968 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 2969 __ load_klass(O0_recv, O0_recv); 2970 __ verify_klass_ptr(O0_recv); 2971 2972 __ profile_virtual_call(O0_recv, O4); 2973 2974 generate_vtable_call(O0_recv, Rscratch, Rret); 2975 } 2976 2977 void TemplateTable::fast_invokevfinal(int byte_no) { 2978 transition(vtos, vtos); 2979 assert(byte_no == f2_byte, "use this argument"); 2980 2981 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 2982 /*is_invokevfinal*/true, false); 2983 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2984 invokevfinal_helper(G3_scratch, Lscratch); 2985 } 2986 2987 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 2988 Register Rtemp = G4_scratch; 2989 2990 // Load receiver from stack slot 2991 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 2992 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 2993 __ load_receiver(G4_scratch, O0); 2994 2995 // receiver NULL check 2996 __ null_check(O0); 2997 2998 __ profile_final_call(O4); 2999 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3000 3001 // get return address 3002 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3003 __ set(table, Rtemp); 3004 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3005 // Make sure we don't need to mask Rret after the above shift 3006 ConstantPoolCacheEntry::verify_tos_state_shift(); 3007 __ sll(Rret, LogBytesPerWord, Rret); 3008 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3009 3010 3011 // do the call 3012 __ call_from_interpreter(Rscratch, Gargs, Rret); 3013 } 3014 3015 3016 void TemplateTable::invokespecial(int byte_no) { 3017 transition(vtos, vtos); 3018 assert(byte_no == f1_byte, "use this argument"); 3019 3020 const Register Rret = Lscratch; 3021 const Register O0_recv = O0; 3022 const Register Rscratch = G3_scratch; 3023 3024 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3025 __ null_check(O0_recv); 3026 3027 // do the call 3028 __ profile_call(O4); 3029 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3030 __ call_from_interpreter(Rscratch, Gargs, Rret); 3031 } 3032 3033 3034 void TemplateTable::invokestatic(int byte_no) { 3035 transition(vtos, vtos); 3036 assert(byte_no == f1_byte, "use this argument"); 3037 3038 const Register Rret = Lscratch; 3039 const Register Rscratch = G3_scratch; 3040 3041 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3042 3043 // do the call 3044 __ profile_call(O4); 3045 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3046 __ call_from_interpreter(Rscratch, Gargs, Rret); 3047 } 3048 3049 void TemplateTable::invokeinterface_object_method(Register RKlass, 3050 Register Rcall, 3051 Register Rret, 3052 Register Rflags) { 3053 Register Rscratch = G4_scratch; 3054 Register Rindex = Lscratch; 3055 3056 assert_different_registers(Rscratch, Rindex, Rret); 3057 3058 Label notFinal; 3059 3060 // Check for vfinal 3061 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3062 __ btst(Rflags, Rscratch); 3063 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3064 __ delayed()->nop(); 3065 3066 __ profile_final_call(O4); 3067 3068 // do the call - the index (f2) contains the Method* 3069 assert_different_registers(G5_method, Gargs, Rcall); 3070 __ mov(Rindex, G5_method); 3071 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3072 __ call_from_interpreter(Rcall, Gargs, Rret); 3073 __ bind(notFinal); 3074 3075 __ profile_virtual_call(RKlass, O4); 3076 generate_vtable_call(RKlass, Rindex, Rret); 3077 } 3078 3079 3080 void TemplateTable::invokeinterface(int byte_no) { 3081 transition(vtos, vtos); 3082 assert(byte_no == f1_byte, "use this argument"); 3083 3084 const Register Rinterface = G1_scratch; 3085 const Register Rmethod = Lscratch; 3086 const Register Rret = G3_scratch; 3087 const Register O0_recv = O0; 3088 const Register O1_flags = O1; 3089 const Register O2_Klass = O2; 3090 const Register Rscratch = G4_scratch; 3091 assert_different_registers(Rscratch, G5_method); 3092 3093 prepare_invoke(byte_no, Rinterface, Rret, Rmethod, O0_recv, O1_flags); 3094 3095 // get receiver klass 3096 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3097 __ load_klass(O0_recv, O2_Klass); 3098 3099 // Special case of invokeinterface called for virtual method of 3100 // java.lang.Object. See cpCacheOop.cpp for details. 3101 // This code isn't produced by javac, but could be produced by 3102 // another compliant java compiler. 3103 Label notMethod; 3104 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3105 __ btst(O1_flags, Rscratch); 3106 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3107 __ delayed()->nop(); 3108 3109 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3110 3111 __ bind(notMethod); 3112 3113 Register Rtemp = O1_flags; 3114 3115 Label L_no_such_interface; 3116 3117 // Receiver subtype check against REFC. 3118 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3119 O2_Klass, Rinterface, noreg, 3120 // outputs: temp reg1, temp reg2, temp reg3 3121 G5_method, Rscratch, Rtemp, 3122 L_no_such_interface, 3123 /*return_method=*/false); 3124 3125 __ profile_virtual_call(O2_Klass, O4); 3126 3127 // 3128 // find entry point to call 3129 // 3130 3131 // Get declaring interface class from method 3132 __ ld_ptr(Rmethod, Method::const_offset(), Rinterface); 3133 __ ld_ptr(Rinterface, ConstMethod::constants_offset(), Rinterface); 3134 __ ld_ptr(Rinterface, ConstantPool::pool_holder_offset_in_bytes(), Rinterface); 3135 3136 // Get itable index from method 3137 const Register Rindex = G5_method; 3138 __ ld(Rmethod, Method::itable_index_offset(), Rindex); 3139 __ sub(Rindex, Method::itable_index_max, Rindex); 3140 __ neg(Rindex); 3141 3142 // Preserve O2_Klass for throw_AbstractMethodErrorVerbose 3143 __ mov(O2_Klass, O4); 3144 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3145 O4, Rinterface, Rindex, 3146 // outputs: method, scan temp reg, temp reg 3147 G5_method, Rscratch, Rtemp, 3148 L_no_such_interface); 3149 3150 // Check for abstract method error. 3151 { 3152 Label ok; 3153 __ br_notnull_short(G5_method, Assembler::pt, ok); 3154 // Pass arguments for generating a verbose error message. 3155 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3156 O2_Klass, Rmethod); 3157 __ should_not_reach_here(); 3158 __ bind(ok); 3159 } 3160 3161 Register Rcall = Rinterface; 3162 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3163 3164 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3165 __ profile_called_method(G5_method, Rscratch); 3166 __ call_from_interpreter(Rcall, Gargs, Rret); 3167 3168 __ bind(L_no_such_interface); 3169 // Pass arguments for generating a verbose error message. 3170 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3171 O2_Klass, Rinterface); 3172 __ should_not_reach_here(); 3173 } 3174 3175 void TemplateTable::invokehandle(int byte_no) { 3176 transition(vtos, vtos); 3177 assert(byte_no == f1_byte, "use this argument"); 3178 3179 const Register Rret = Lscratch; 3180 const Register G4_mtype = G4_scratch; 3181 const Register O0_recv = O0; 3182 const Register Rscratch = G3_scratch; 3183 3184 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3185 __ null_check(O0_recv); 3186 3187 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3188 // G5: MH.invokeExact_MT method (from f2) 3189 3190 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3191 3192 // do the call 3193 __ verify_oop(G4_mtype); 3194 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3195 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3196 __ call_from_interpreter(Rscratch, Gargs, Rret); 3197 } 3198 3199 3200 void TemplateTable::invokedynamic(int byte_no) { 3201 transition(vtos, vtos); 3202 assert(byte_no == f1_byte, "use this argument"); 3203 3204 const Register Rret = Lscratch; 3205 const Register G4_callsite = G4_scratch; 3206 const Register Rscratch = G3_scratch; 3207 3208 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3209 3210 // G4: CallSite object (from cpool->resolved_references[f1]) 3211 // G5: MH.linkToCallSite method (from f2) 3212 3213 // Note: G4_callsite is already pushed by prepare_invoke 3214 3215 // %%% should make a type profile for any invokedynamic that takes a ref argument 3216 // profile this call 3217 __ profile_call(O4); 3218 3219 // do the call 3220 __ verify_oop(G4_callsite); 3221 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3222 __ call_from_interpreter(Rscratch, Gargs, Rret); 3223 } 3224 3225 3226 //---------------------------------------------------------------------------------------------------- 3227 // Allocation 3228 3229 void TemplateTable::_new() { 3230 transition(vtos, atos); 3231 3232 Label slow_case; 3233 Label done; 3234 Label initialize_header; 3235 Label initialize_object; // including clearing the fields 3236 3237 Register RallocatedObject = Otos_i; 3238 Register RinstanceKlass = O1; 3239 Register Roffset = O3; 3240 Register Rscratch = O4; 3241 3242 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3243 __ get_cpool_and_tags(Rscratch, G3_scratch); 3244 // make sure the class we're about to instantiate has been resolved 3245 // This is done before loading InstanceKlass to be consistent with the order 3246 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3247 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3248 __ ldub(G3_scratch, Roffset, G3_scratch); 3249 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3250 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3251 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3252 // get InstanceKlass 3253 __ load_resolved_klass_at_offset(Rscratch, Roffset, RinstanceKlass); 3254 3255 // make sure klass is fully initialized: 3256 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3257 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3258 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3259 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3260 3261 // get instance_size in InstanceKlass (already aligned) 3262 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3263 3264 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3265 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3266 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3267 __ delayed()->nop(); 3268 3269 // Allocate the instance: 3270 // If TLAB is enabled: 3271 // Try to allocate in the TLAB. 3272 // If fails, go to the slow path. 3273 // Else If inline contiguous allocations are enabled: 3274 // Try to allocate in eden. 3275 // If fails due to heap end, go to slow path. 3276 // 3277 // If TLAB is enabled OR inline contiguous is enabled: 3278 // Initialize the allocation. 3279 // Exit. 3280 // 3281 // Go to slow path. 3282 3283 const bool allow_shared_alloc = 3284 Universe::heap()->supports_inline_contig_alloc(); 3285 3286 if(UseTLAB) { 3287 Register RoldTopValue = RallocatedObject; 3288 Register RtlabWasteLimitValue = G3_scratch; 3289 Register RnewTopValue = G1_scratch; 3290 Register RendValue = Rscratch; 3291 Register RfreeValue = RnewTopValue; 3292 3293 // check if we can allocate in the TLAB 3294 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3295 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3296 __ add(RoldTopValue, Roffset, RnewTopValue); 3297 3298 // if there is enough space, we do not CAS and do not clear 3299 __ cmp(RnewTopValue, RendValue); 3300 if(ZeroTLAB) { 3301 // the fields have already been cleared 3302 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3303 } else { 3304 // initialize both the header and fields 3305 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3306 } 3307 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3308 3309 // Allocation does not fit in the TLAB. 3310 __ ba_short(slow_case); 3311 } else { 3312 // Allocation in the shared Eden 3313 if (allow_shared_alloc) { 3314 Register RoldTopValue = G1_scratch; 3315 Register RtopAddr = G3_scratch; 3316 Register RnewTopValue = RallocatedObject; 3317 Register RendValue = Rscratch; 3318 3319 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3320 3321 Label retry; 3322 __ bind(retry); 3323 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3324 __ ld_ptr(RendValue, 0, RendValue); 3325 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3326 __ add(RoldTopValue, Roffset, RnewTopValue); 3327 3328 // RnewTopValue contains the top address after the new object 3329 // has been allocated. 3330 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3331 3332 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3333 3334 // if someone beat us on the allocation, try again, otherwise continue 3335 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3336 3337 // bump total bytes allocated by this thread 3338 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3339 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3340 } 3341 } 3342 3343 // If UseTLAB or allow_shared_alloc are true, the object is created above and 3344 // there is an initialize need. Otherwise, skip and go to the slow path. 3345 if (UseTLAB || allow_shared_alloc) { 3346 // clear object fields 3347 __ bind(initialize_object); 3348 __ deccc(Roffset, sizeof(oopDesc)); 3349 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3350 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3351 3352 // initialize remaining object fields 3353 if (UseBlockZeroing) { 3354 // Use BIS for zeroing 3355 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3356 } else { 3357 Label loop; 3358 __ subcc(Roffset, wordSize, Roffset); 3359 __ bind(loop); 3360 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3361 __ st_ptr(G0, G3_scratch, Roffset); 3362 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3363 __ delayed()->subcc(Roffset, wordSize, Roffset); 3364 } 3365 __ ba_short(initialize_header); 3366 } 3367 3368 // slow case 3369 __ bind(slow_case); 3370 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3371 __ get_constant_pool(O1); 3372 3373 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3374 3375 __ ba_short(done); 3376 3377 // Initialize the header: mark, klass 3378 __ bind(initialize_header); 3379 3380 if (UseBiasedLocking) { 3381 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3382 } else { 3383 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3384 } 3385 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3386 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3387 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3388 3389 { 3390 SkipIfEqual skip_if( 3391 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3392 // Trigger dtrace event 3393 __ push(atos); 3394 __ call_VM_leaf(noreg, 3395 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3396 __ pop(atos); 3397 } 3398 3399 // continue 3400 __ bind(done); 3401 } 3402 3403 3404 3405 void TemplateTable::newarray() { 3406 transition(itos, atos); 3407 __ ldub(Lbcp, 1, O1); 3408 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3409 } 3410 3411 3412 void TemplateTable::anewarray() { 3413 transition(itos, atos); 3414 __ get_constant_pool(O1); 3415 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3416 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3417 } 3418 3419 3420 void TemplateTable::arraylength() { 3421 transition(atos, itos); 3422 Label ok; 3423 __ verify_oop(Otos_i); 3424 __ tst(Otos_i); 3425 __ throw_if_not_1_x( Assembler::notZero, ok ); 3426 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3427 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3428 } 3429 3430 3431 void TemplateTable::checkcast() { 3432 transition(atos, atos); 3433 Label done, is_null, quicked, cast_ok, resolved; 3434 Register Roffset = G1_scratch; 3435 Register RobjKlass = O5; 3436 Register RspecifiedKlass = O4; 3437 3438 // Check for casting a NULL 3439 __ br_null(Otos_i, false, Assembler::pn, is_null); 3440 __ delayed()->nop(); 3441 3442 // Get value klass in RobjKlass 3443 __ load_klass(Otos_i, RobjKlass); // get value klass 3444 3445 // Get constant pool tag 3446 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3447 3448 // See if the checkcast has been quickened 3449 __ get_cpool_and_tags(Lscratch, G3_scratch); 3450 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3451 __ ldub(G3_scratch, Roffset, G3_scratch); 3452 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3453 __ br(Assembler::equal, true, Assembler::pt, quicked); 3454 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3455 3456 __ push_ptr(); // save receiver for result, and for GC 3457 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3458 __ get_vm_result_2(RspecifiedKlass); 3459 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3460 3461 __ ba_short(resolved); 3462 3463 // Extract target class from constant pool 3464 __ bind(quicked); 3465 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass); 3466 3467 3468 __ bind(resolved); 3469 __ load_klass(Otos_i, RobjKlass); // get value klass 3470 3471 // Generate a fast subtype check. Branch to cast_ok if no 3472 // failure. Throw exception if failure. 3473 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3474 3475 // Not a subtype; so must throw exception 3476 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3477 3478 __ bind(cast_ok); 3479 3480 if (ProfileInterpreter) { 3481 __ ba_short(done); 3482 } 3483 __ bind(is_null); 3484 __ profile_null_seen(G3_scratch); 3485 __ bind(done); 3486 } 3487 3488 3489 void TemplateTable::instanceof() { 3490 Label done, is_null, quicked, resolved; 3491 transition(atos, itos); 3492 Register Roffset = G1_scratch; 3493 Register RobjKlass = O5; 3494 Register RspecifiedKlass = O4; 3495 3496 // Check for casting a NULL 3497 __ br_null(Otos_i, false, Assembler::pt, is_null); 3498 __ delayed()->nop(); 3499 3500 // Get value klass in RobjKlass 3501 __ load_klass(Otos_i, RobjKlass); // get value klass 3502 3503 // Get constant pool tag 3504 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3505 3506 // See if the checkcast has been quickened 3507 __ get_cpool_and_tags(Lscratch, G3_scratch); 3508 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3509 __ ldub(G3_scratch, Roffset, G3_scratch); 3510 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3511 __ br(Assembler::equal, true, Assembler::pt, quicked); 3512 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3513 3514 __ push_ptr(); // save receiver for result, and for GC 3515 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3516 __ get_vm_result_2(RspecifiedKlass); 3517 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3518 3519 __ ba_short(resolved); 3520 3521 // Extract target class from constant pool 3522 __ bind(quicked); 3523 __ get_constant_pool(Lscratch); 3524 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass); 3525 3526 __ bind(resolved); 3527 __ load_klass(Otos_i, RobjKlass); // get value klass 3528 3529 // Generate a fast subtype check. Branch to cast_ok if no 3530 // failure. Return 0 if failure. 3531 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3532 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3533 // Not a subtype; return 0; 3534 __ clr( Otos_i ); 3535 3536 if (ProfileInterpreter) { 3537 __ ba_short(done); 3538 } 3539 __ bind(is_null); 3540 __ profile_null_seen(G3_scratch); 3541 __ bind(done); 3542 } 3543 3544 void TemplateTable::_breakpoint() { 3545 3546 // Note: We get here even if we are single stepping.. 3547 // jbug insists on setting breakpoints at every bytecode 3548 // even if we are in single step mode. 3549 3550 transition(vtos, vtos); 3551 // get the unpatched byte code 3552 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3553 __ mov(O0, Lbyte_code); 3554 3555 // post the breakpoint event 3556 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3557 3558 // complete the execution of original bytecode 3559 __ dispatch_normal(vtos); 3560 } 3561 3562 3563 //---------------------------------------------------------------------------------------------------- 3564 // Exceptions 3565 3566 void TemplateTable::athrow() { 3567 transition(atos, vtos); 3568 3569 // This works because exception is cached in Otos_i which is same as O0, 3570 // which is same as what throw_exception_entry_expects 3571 assert(Otos_i == Oexception, "see explanation above"); 3572 3573 __ verify_oop(Otos_i); 3574 __ null_check(Otos_i); 3575 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3576 } 3577 3578 3579 //---------------------------------------------------------------------------------------------------- 3580 // Synchronization 3581 3582 3583 // See frame_sparc.hpp for monitor block layout. 3584 // Monitor elements are dynamically allocated by growing stack as needed. 3585 3586 void TemplateTable::monitorenter() { 3587 transition(atos, vtos); 3588 __ verify_oop(Otos_i); 3589 // Try to acquire a lock on the object 3590 // Repeat until succeeded (i.e., until 3591 // monitorenter returns true). 3592 3593 { Label ok; 3594 __ tst(Otos_i); 3595 __ throw_if_not_1_x( Assembler::notZero, ok); 3596 __ delayed()->mov(Otos_i, Lscratch); // save obj 3597 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3598 } 3599 3600 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3601 3602 // find a free slot in the monitor block 3603 3604 3605 // initialize entry pointer 3606 __ clr(O1); // points to free slot or NULL 3607 3608 { 3609 Label entry, loop, exit; 3610 __ add( __ top_most_monitor(), O2 ); // last one to check 3611 __ ba( entry ); 3612 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3613 3614 3615 __ bind( loop ); 3616 3617 __ verify_oop(O4); // verify each monitor's oop 3618 __ tst(O4); // is this entry unused? 3619 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3620 3621 __ cmp(O4, O0); // check if current entry is for same object 3622 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3623 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3624 3625 __ bind( entry ); 3626 3627 __ cmp( O3, O2 ); 3628 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3629 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3630 3631 __ bind( exit ); 3632 } 3633 3634 { Label allocated; 3635 3636 // found free slot? 3637 __ br_notnull_short(O1, Assembler::pn, allocated); 3638 3639 __ add_monitor_to_stack( false, O2, O3 ); 3640 __ mov(Lmonitors, O1); 3641 3642 __ bind(allocated); 3643 } 3644 3645 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3646 // The object has already been poped from the stack, so the expression stack looks correct. 3647 __ inc(Lbcp); 3648 3649 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3650 __ lock_object(O1, O0); 3651 3652 // check if there's enough space on the stack for the monitors after locking 3653 __ generate_stack_overflow_check(0); 3654 3655 // The bcp has already been incremented. Just need to dispatch to next instruction. 3656 __ dispatch_next(vtos); 3657 } 3658 3659 3660 void TemplateTable::monitorexit() { 3661 transition(atos, vtos); 3662 __ verify_oop(Otos_i); 3663 __ tst(Otos_i); 3664 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3665 3666 assert(O0 == Otos_i, "just checking"); 3667 3668 { Label entry, loop, found; 3669 __ add( __ top_most_monitor(), O2 ); // last one to check 3670 __ ba(entry); 3671 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3672 // By using a local it survives the call to the C routine. 3673 __ delayed()->mov( Lmonitors, Lscratch ); 3674 3675 __ bind( loop ); 3676 3677 __ verify_oop(O4); // verify each monitor's oop 3678 __ cmp(O4, O0); // check if current entry is for desired object 3679 __ brx( Assembler::equal, true, Assembler::pt, found ); 3680 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3681 3682 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3683 3684 __ bind( entry ); 3685 3686 __ cmp( Lscratch, O2 ); 3687 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3688 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3689 3690 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3691 __ should_not_reach_here(); 3692 3693 __ bind(found); 3694 } 3695 __ unlock_object(O1); 3696 } 3697 3698 3699 //---------------------------------------------------------------------------------------------------- 3700 // Wide instructions 3701 3702 void TemplateTable::wide() { 3703 transition(vtos, vtos); 3704 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3705 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3706 AddressLiteral ep(Interpreter::_wentry_point); 3707 __ set(ep, G4_scratch); 3708 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3709 __ jmp(G3_scratch, G0); 3710 __ delayed()->nop(); 3711 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3712 } 3713 3714 3715 //---------------------------------------------------------------------------------------------------- 3716 // Multi arrays 3717 3718 void TemplateTable::multianewarray() { 3719 transition(vtos, atos); 3720 // put ndims * wordSize into Lscratch 3721 __ ldub( Lbcp, 3, Lscratch); 3722 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3723 // Lesp points past last_dim, so set to O1 to first_dim address 3724 __ add( Lesp, Lscratch, O1); 3725 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3726 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3727 }