1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/templateTable.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodData.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/stubRoutines.hpp" 36 #include "runtime/synchronizer.hpp" 37 #include "utilities/macros.hpp" 38 39 #ifndef CC_INTERP 40 #define __ _masm-> 41 42 // Misc helpers 43 44 // Do an oop store like *(base + index + offset) = val 45 // index can be noreg, 46 static void do_oop_store(InterpreterMacroAssembler* _masm, 47 Register base, 48 Register index, 49 int offset, 50 Register val, 51 Register tmp, 52 BarrierSet::Name barrier, 53 bool precise) { 54 assert(tmp != val && tmp != base && tmp != index, "register collision"); 55 assert(index == noreg || offset == 0, "only one offset"); 56 switch (barrier) { 57 #if INCLUDE_ALL_GCS 58 case BarrierSet::G1SATBCT: 59 case BarrierSet::G1SATBCTLogging: 60 { 61 // Load and record the previous value. 62 __ g1_write_barrier_pre(base, index, offset, 63 noreg /* pre_val */, 64 tmp, true /*preserve_o_regs*/); 65 66 // G1 barrier needs uncompressed oop for region cross check. 67 Register new_val = val; 68 if (UseCompressedOops && val != G0) { 69 new_val = tmp; 70 __ mov(val, new_val); 71 } 72 73 if (index == noreg ) { 74 assert(Assembler::is_simm13(offset), "fix this code"); 75 __ store_heap_oop(val, base, offset); 76 } else { 77 __ store_heap_oop(val, base, index); 78 } 79 80 // No need for post barrier if storing NULL 81 if (val != G0) { 82 if (precise) { 83 if (index == noreg) { 84 __ add(base, offset, base); 85 } else { 86 __ add(base, index, base); 87 } 88 } 89 __ g1_write_barrier_post(base, new_val, tmp); 90 } 91 } 92 break; 93 #endif // INCLUDE_ALL_GCS 94 case BarrierSet::CardTableModRef: 95 case BarrierSet::CardTableExtension: 96 { 97 if (index == noreg ) { 98 assert(Assembler::is_simm13(offset), "fix this code"); 99 __ store_heap_oop(val, base, offset); 100 } else { 101 __ store_heap_oop(val, base, index); 102 } 103 // No need for post barrier if storing NULL 104 if (val != G0) { 105 if (precise) { 106 if (index == noreg) { 107 __ add(base, offset, base); 108 } else { 109 __ add(base, index, base); 110 } 111 } 112 __ card_write_barrier_post(base, val, tmp); 113 } 114 } 115 break; 116 case BarrierSet::ModRef: 117 case BarrierSet::Other: 118 ShouldNotReachHere(); 119 break; 120 default : 121 ShouldNotReachHere(); 122 123 } 124 } 125 126 127 //---------------------------------------------------------------------------------------------------- 128 // Platform-dependent initialization 129 130 void TemplateTable::pd_initialize() { 131 // (none) 132 } 133 134 135 //---------------------------------------------------------------------------------------------------- 136 // Condition conversion 137 Assembler::Condition ccNot(TemplateTable::Condition cc) { 138 switch (cc) { 139 case TemplateTable::equal : return Assembler::notEqual; 140 case TemplateTable::not_equal : return Assembler::equal; 141 case TemplateTable::less : return Assembler::greaterEqual; 142 case TemplateTable::less_equal : return Assembler::greater; 143 case TemplateTable::greater : return Assembler::lessEqual; 144 case TemplateTable::greater_equal: return Assembler::less; 145 } 146 ShouldNotReachHere(); 147 return Assembler::zero; 148 } 149 150 //---------------------------------------------------------------------------------------------------- 151 // Miscelaneous helper routines 152 153 154 Address TemplateTable::at_bcp(int offset) { 155 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 156 return Address(Lbcp, offset); 157 } 158 159 160 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 161 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 162 int byte_no) { 163 // With sharing on, may need to test Method* flag. 164 if (!RewriteBytecodes) return; 165 Label L_patch_done; 166 167 switch (bc) { 168 case Bytecodes::_fast_aputfield: 169 case Bytecodes::_fast_bputfield: 170 case Bytecodes::_fast_zputfield: 171 case Bytecodes::_fast_cputfield: 172 case Bytecodes::_fast_dputfield: 173 case Bytecodes::_fast_fputfield: 174 case Bytecodes::_fast_iputfield: 175 case Bytecodes::_fast_lputfield: 176 case Bytecodes::_fast_sputfield: 177 { 178 // We skip bytecode quickening for putfield instructions when 179 // the put_code written to the constant pool cache is zero. 180 // This is required so that every execution of this instruction 181 // calls out to InterpreterRuntime::resolve_get_put to do 182 // additional, required work. 183 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 184 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 185 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 186 __ set(bc, bc_reg); 187 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 188 } 189 break; 190 default: 191 assert(byte_no == -1, "sanity"); 192 if (load_bc_into_bc_reg) { 193 __ set(bc, bc_reg); 194 } 195 } 196 197 if (JvmtiExport::can_post_breakpoint()) { 198 Label L_fast_patch; 199 __ ldub(at_bcp(0), temp_reg); 200 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 201 // perform the quickening, slowly, in the bowels of the breakpoint table 202 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 203 __ ba_short(L_patch_done); 204 __ bind(L_fast_patch); 205 } 206 207 #ifdef ASSERT 208 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 209 Label L_okay; 210 __ ldub(at_bcp(0), temp_reg); 211 __ cmp(temp_reg, orig_bytecode); 212 __ br(Assembler::equal, false, Assembler::pt, L_okay); 213 __ delayed()->cmp(temp_reg, bc_reg); 214 __ br(Assembler::equal, false, Assembler::pt, L_okay); 215 __ delayed()->nop(); 216 __ stop("patching the wrong bytecode"); 217 __ bind(L_okay); 218 #endif 219 220 // patch bytecode 221 __ stb(bc_reg, at_bcp(0)); 222 __ bind(L_patch_done); 223 } 224 225 //---------------------------------------------------------------------------------------------------- 226 // Individual instructions 227 228 void TemplateTable::nop() { 229 transition(vtos, vtos); 230 // nothing to do 231 } 232 233 void TemplateTable::shouldnotreachhere() { 234 transition(vtos, vtos); 235 __ stop("shouldnotreachhere bytecode"); 236 } 237 238 void TemplateTable::aconst_null() { 239 transition(vtos, atos); 240 __ clr(Otos_i); 241 } 242 243 244 void TemplateTable::iconst(int value) { 245 transition(vtos, itos); 246 __ set(value, Otos_i); 247 } 248 249 250 void TemplateTable::lconst(int value) { 251 transition(vtos, ltos); 252 assert(value >= 0, "check this code"); 253 #ifdef _LP64 254 __ set(value, Otos_l); 255 #else 256 __ set(value, Otos_l2); 257 __ clr( Otos_l1); 258 #endif 259 } 260 261 262 void TemplateTable::fconst(int value) { 263 transition(vtos, ftos); 264 static float zero = 0.0, one = 1.0, two = 2.0; 265 float* p; 266 switch( value ) { 267 default: ShouldNotReachHere(); 268 case 0: p = &zero; break; 269 case 1: p = &one; break; 270 case 2: p = &two; break; 271 } 272 AddressLiteral a(p); 273 __ sethi(a, G3_scratch); 274 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 275 } 276 277 278 void TemplateTable::dconst(int value) { 279 transition(vtos, dtos); 280 static double zero = 0.0, one = 1.0; 281 double* p; 282 switch( value ) { 283 default: ShouldNotReachHere(); 284 case 0: p = &zero; break; 285 case 1: p = &one; break; 286 } 287 AddressLiteral a(p); 288 __ sethi(a, G3_scratch); 289 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 290 } 291 292 293 // %%%%% Should factore most snippet templates across platforms 294 295 void TemplateTable::bipush() { 296 transition(vtos, itos); 297 __ ldsb( at_bcp(1), Otos_i ); 298 } 299 300 void TemplateTable::sipush() { 301 transition(vtos, itos); 302 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 303 } 304 305 void TemplateTable::ldc(bool wide) { 306 transition(vtos, vtos); 307 Label call_ldc, notInt, isString, notString, notClass, exit; 308 309 if (wide) { 310 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 311 } else { 312 __ ldub(Lbcp, 1, O1); 313 } 314 __ get_cpool_and_tags(O0, O2); 315 316 const int base_offset = ConstantPool::header_size() * wordSize; 317 const int tags_offset = Array<u1>::base_offset_in_bytes(); 318 319 // get type from tags 320 __ add(O2, tags_offset, O2); 321 __ ldub(O2, O1, O2); 322 323 // unresolved class? If so, must resolve 324 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 325 326 // unresolved class in error state 327 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 328 329 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 330 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 331 __ delayed()->add(O0, base_offset, O0); 332 333 __ bind(call_ldc); 334 __ set(wide, O1); 335 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 336 __ push(atos); 337 __ ba_short(exit); 338 339 __ bind(notClass); 340 // __ add(O0, base_offset, O0); 341 __ sll(O1, LogBytesPerWord, O1); 342 __ cmp(O2, JVM_CONSTANT_Integer); 343 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 344 __ delayed()->cmp(O2, JVM_CONSTANT_String); 345 __ ld(O0, O1, Otos_i); 346 __ push(itos); 347 __ ba_short(exit); 348 349 __ bind(notInt); 350 // __ cmp(O2, JVM_CONSTANT_String); 351 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 352 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 353 __ bind(isString); 354 __ stop("string should be rewritten to fast_aldc"); 355 __ ba_short(exit); 356 357 __ bind(notString); 358 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 359 __ push(ftos); 360 361 __ bind(exit); 362 } 363 364 // Fast path for caching oop constants. 365 // %%% We should use this to handle Class and String constants also. 366 // %%% It will simplify the ldc/primitive path considerably. 367 void TemplateTable::fast_aldc(bool wide) { 368 transition(vtos, atos); 369 370 int index_size = wide ? sizeof(u2) : sizeof(u1); 371 Label resolved; 372 373 // We are resolved if the resolved reference cache entry contains a 374 // non-null object (CallSite, etc.) 375 assert_different_registers(Otos_i, G3_scratch); 376 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 377 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 378 __ tst(Otos_i); 379 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 380 __ delayed()->set((int)bytecode(), O1); 381 382 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 383 384 // first time invocation - must resolve first 385 __ call_VM(Otos_i, entry, O1); 386 __ bind(resolved); 387 __ verify_oop(Otos_i); 388 } 389 390 391 void TemplateTable::ldc2_w() { 392 transition(vtos, vtos); 393 Label Long, exit; 394 395 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 396 __ get_cpool_and_tags(O0, O2); 397 398 const int base_offset = ConstantPool::header_size() * wordSize; 399 const int tags_offset = Array<u1>::base_offset_in_bytes(); 400 // get type from tags 401 __ add(O2, tags_offset, O2); 402 __ ldub(O2, O1, O2); 403 404 __ sll(O1, LogBytesPerWord, O1); 405 __ add(O0, O1, G3_scratch); 406 407 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 408 // A double can be placed at word-aligned locations in the constant pool. 409 // Check out Conversions.java for an example. 410 // Also ConstantPool::header_size() is 20, which makes it very difficult 411 // to double-align double on the constant pool. SG, 11/7/97 412 #ifdef _LP64 413 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 414 #else 415 FloatRegister f = Ftos_d; 416 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f); 417 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2, 418 f->successor()); 419 #endif 420 __ push(dtos); 421 __ ba_short(exit); 422 423 __ bind(Long); 424 #ifdef _LP64 425 __ ldx(G3_scratch, base_offset, Otos_l); 426 #else 427 __ ld(G3_scratch, base_offset, Otos_l); 428 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor()); 429 #endif 430 __ push(ltos); 431 432 __ bind(exit); 433 } 434 435 436 void TemplateTable::locals_index(Register reg, int offset) { 437 __ ldub( at_bcp(offset), reg ); 438 } 439 440 441 void TemplateTable::locals_index_wide(Register reg) { 442 // offset is 2, not 1, because Lbcp points to wide prefix code 443 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 444 } 445 446 void TemplateTable::iload() { 447 transition(vtos, itos); 448 // Rewrite iload,iload pair into fast_iload2 449 // iload,caload pair into fast_icaload 450 if (RewriteFrequentPairs) { 451 Label rewrite, done; 452 453 // get next byte 454 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 455 456 // if _iload, wait to rewrite to iload2. We only want to rewrite the 457 // last two iloads in a pair. Comparing against fast_iload means that 458 // the next bytecode is neither an iload or a caload, and therefore 459 // an iload pair. 460 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 461 462 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 463 __ br(Assembler::equal, false, Assembler::pn, rewrite); 464 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 465 466 __ cmp(G3_scratch, (int)Bytecodes::_caload); 467 __ br(Assembler::equal, false, Assembler::pn, rewrite); 468 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 469 470 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 471 // rewrite 472 // G4_scratch: fast bytecode 473 __ bind(rewrite); 474 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 475 __ bind(done); 476 } 477 478 // Get the local value into tos 479 locals_index(G3_scratch); 480 __ access_local_int( G3_scratch, Otos_i ); 481 } 482 483 void TemplateTable::fast_iload2() { 484 transition(vtos, itos); 485 locals_index(G3_scratch); 486 __ access_local_int( G3_scratch, Otos_i ); 487 __ push_i(); 488 locals_index(G3_scratch, 3); // get next bytecode's local index. 489 __ access_local_int( G3_scratch, Otos_i ); 490 } 491 492 void TemplateTable::fast_iload() { 493 transition(vtos, itos); 494 locals_index(G3_scratch); 495 __ access_local_int( G3_scratch, Otos_i ); 496 } 497 498 void TemplateTable::lload() { 499 transition(vtos, ltos); 500 locals_index(G3_scratch); 501 __ access_local_long( G3_scratch, Otos_l ); 502 } 503 504 505 void TemplateTable::fload() { 506 transition(vtos, ftos); 507 locals_index(G3_scratch); 508 __ access_local_float( G3_scratch, Ftos_f ); 509 } 510 511 512 void TemplateTable::dload() { 513 transition(vtos, dtos); 514 locals_index(G3_scratch); 515 __ access_local_double( G3_scratch, Ftos_d ); 516 } 517 518 519 void TemplateTable::aload() { 520 transition(vtos, atos); 521 locals_index(G3_scratch); 522 __ access_local_ptr( G3_scratch, Otos_i); 523 } 524 525 526 void TemplateTable::wide_iload() { 527 transition(vtos, itos); 528 locals_index_wide(G3_scratch); 529 __ access_local_int( G3_scratch, Otos_i ); 530 } 531 532 533 void TemplateTable::wide_lload() { 534 transition(vtos, ltos); 535 locals_index_wide(G3_scratch); 536 __ access_local_long( G3_scratch, Otos_l ); 537 } 538 539 540 void TemplateTable::wide_fload() { 541 transition(vtos, ftos); 542 locals_index_wide(G3_scratch); 543 __ access_local_float( G3_scratch, Ftos_f ); 544 } 545 546 547 void TemplateTable::wide_dload() { 548 transition(vtos, dtos); 549 locals_index_wide(G3_scratch); 550 __ access_local_double( G3_scratch, Ftos_d ); 551 } 552 553 554 void TemplateTable::wide_aload() { 555 transition(vtos, atos); 556 locals_index_wide(G3_scratch); 557 __ access_local_ptr( G3_scratch, Otos_i ); 558 __ verify_oop(Otos_i); 559 } 560 561 562 void TemplateTable::iaload() { 563 transition(itos, itos); 564 // Otos_i: index 565 // tos: array 566 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 567 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 568 } 569 570 571 void TemplateTable::laload() { 572 transition(itos, ltos); 573 // Otos_i: index 574 // O2: array 575 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 576 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 577 } 578 579 580 void TemplateTable::faload() { 581 transition(itos, ftos); 582 // Otos_i: index 583 // O2: array 584 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 585 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 586 } 587 588 589 void TemplateTable::daload() { 590 transition(itos, dtos); 591 // Otos_i: index 592 // O2: array 593 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 594 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 595 } 596 597 598 void TemplateTable::aaload() { 599 transition(itos, atos); 600 // Otos_i: index 601 // tos: array 602 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 603 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 604 __ verify_oop(Otos_i); 605 } 606 607 608 void TemplateTable::baload() { 609 transition(itos, itos); 610 // Otos_i: index 611 // tos: array 612 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 613 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 614 } 615 616 617 void TemplateTable::caload() { 618 transition(itos, itos); 619 // Otos_i: index 620 // tos: array 621 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 622 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 623 } 624 625 void TemplateTable::fast_icaload() { 626 transition(vtos, itos); 627 // Otos_i: index 628 // tos: array 629 locals_index(G3_scratch); 630 __ access_local_int( G3_scratch, Otos_i ); 631 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 632 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 633 } 634 635 636 void TemplateTable::saload() { 637 transition(itos, itos); 638 // Otos_i: index 639 // tos: array 640 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 641 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 642 } 643 644 645 void TemplateTable::iload(int n) { 646 transition(vtos, itos); 647 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 648 } 649 650 651 void TemplateTable::lload(int n) { 652 transition(vtos, ltos); 653 assert(n+1 < Argument::n_register_parameters, "would need more code"); 654 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 655 } 656 657 658 void TemplateTable::fload(int n) { 659 transition(vtos, ftos); 660 assert(n < Argument::n_register_parameters, "would need more code"); 661 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 662 } 663 664 665 void TemplateTable::dload(int n) { 666 transition(vtos, dtos); 667 FloatRegister dst = Ftos_d; 668 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 669 } 670 671 672 void TemplateTable::aload(int n) { 673 transition(vtos, atos); 674 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 675 } 676 677 678 void TemplateTable::aload_0() { 679 transition(vtos, atos); 680 681 // According to bytecode histograms, the pairs: 682 // 683 // _aload_0, _fast_igetfield (itos) 684 // _aload_0, _fast_agetfield (atos) 685 // _aload_0, _fast_fgetfield (ftos) 686 // 687 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 688 // bytecode checks the next bytecode and then rewrites the current 689 // bytecode into a pair bytecode; otherwise it rewrites the current 690 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 691 // 692 if (RewriteFrequentPairs) { 693 Label rewrite, done; 694 695 // get next byte 696 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 697 698 // do actual aload_0 699 aload(0); 700 701 // if _getfield then wait with rewrite 702 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 703 704 // if _igetfield then rewrite to _fast_iaccess_0 705 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 706 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 707 __ br(Assembler::equal, false, Assembler::pn, rewrite); 708 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 709 710 // if _agetfield then rewrite to _fast_aaccess_0 711 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 712 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 713 __ br(Assembler::equal, false, Assembler::pn, rewrite); 714 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 715 716 // if _fgetfield then rewrite to _fast_faccess_0 717 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 718 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 719 __ br(Assembler::equal, false, Assembler::pn, rewrite); 720 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 721 722 // else rewrite to _fast_aload0 723 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 724 __ set(Bytecodes::_fast_aload_0, G4_scratch); 725 726 // rewrite 727 // G4_scratch: fast bytecode 728 __ bind(rewrite); 729 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 730 __ bind(done); 731 } else { 732 aload(0); 733 } 734 } 735 736 737 void TemplateTable::istore() { 738 transition(itos, vtos); 739 locals_index(G3_scratch); 740 __ store_local_int( G3_scratch, Otos_i ); 741 } 742 743 744 void TemplateTable::lstore() { 745 transition(ltos, vtos); 746 locals_index(G3_scratch); 747 __ store_local_long( G3_scratch, Otos_l ); 748 } 749 750 751 void TemplateTable::fstore() { 752 transition(ftos, vtos); 753 locals_index(G3_scratch); 754 __ store_local_float( G3_scratch, Ftos_f ); 755 } 756 757 758 void TemplateTable::dstore() { 759 transition(dtos, vtos); 760 locals_index(G3_scratch); 761 __ store_local_double( G3_scratch, Ftos_d ); 762 } 763 764 765 void TemplateTable::astore() { 766 transition(vtos, vtos); 767 __ load_ptr(0, Otos_i); 768 __ inc(Lesp, Interpreter::stackElementSize); 769 __ verify_oop_or_return_address(Otos_i, G3_scratch); 770 locals_index(G3_scratch); 771 __ store_local_ptr(G3_scratch, Otos_i); 772 } 773 774 775 void TemplateTable::wide_istore() { 776 transition(vtos, vtos); 777 __ pop_i(); 778 locals_index_wide(G3_scratch); 779 __ store_local_int( G3_scratch, Otos_i ); 780 } 781 782 783 void TemplateTable::wide_lstore() { 784 transition(vtos, vtos); 785 __ pop_l(); 786 locals_index_wide(G3_scratch); 787 __ store_local_long( G3_scratch, Otos_l ); 788 } 789 790 791 void TemplateTable::wide_fstore() { 792 transition(vtos, vtos); 793 __ pop_f(); 794 locals_index_wide(G3_scratch); 795 __ store_local_float( G3_scratch, Ftos_f ); 796 } 797 798 799 void TemplateTable::wide_dstore() { 800 transition(vtos, vtos); 801 __ pop_d(); 802 locals_index_wide(G3_scratch); 803 __ store_local_double( G3_scratch, Ftos_d ); 804 } 805 806 807 void TemplateTable::wide_astore() { 808 transition(vtos, vtos); 809 __ load_ptr(0, Otos_i); 810 __ inc(Lesp, Interpreter::stackElementSize); 811 __ verify_oop_or_return_address(Otos_i, G3_scratch); 812 locals_index_wide(G3_scratch); 813 __ store_local_ptr(G3_scratch, Otos_i); 814 } 815 816 817 void TemplateTable::iastore() { 818 transition(itos, vtos); 819 __ pop_i(O2); // index 820 // Otos_i: val 821 // O3: array 822 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 823 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 824 } 825 826 827 void TemplateTable::lastore() { 828 transition(ltos, vtos); 829 __ pop_i(O2); // index 830 // Otos_l: val 831 // O3: array 832 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 833 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 834 } 835 836 837 void TemplateTable::fastore() { 838 transition(ftos, vtos); 839 __ pop_i(O2); // index 840 // Ftos_f: val 841 // O3: array 842 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 843 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 844 } 845 846 847 void TemplateTable::dastore() { 848 transition(dtos, vtos); 849 __ pop_i(O2); // index 850 // Fos_d: val 851 // O3: array 852 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 853 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 854 } 855 856 857 void TemplateTable::aastore() { 858 Label store_ok, is_null, done; 859 transition(vtos, vtos); 860 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 861 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 862 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 863 // Otos_i: val 864 // O2: index 865 // O3: array 866 __ verify_oop(Otos_i); 867 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 868 869 // do array store check - check for NULL value first 870 __ br_null_short( Otos_i, Assembler::pn, is_null ); 871 872 __ load_klass(O3, O4); // get array klass 873 __ load_klass(Otos_i, O5); // get value klass 874 875 // do fast instanceof cache test 876 877 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 878 879 assert(Otos_i == O0, "just checking"); 880 881 // Otos_i: value 882 // O1: addr - offset 883 // O2: index 884 // O3: array 885 // O4: array element klass 886 // O5: value klass 887 888 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 889 890 // Generate a fast subtype check. Branch to store_ok if no 891 // failure. Throw if failure. 892 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 893 894 // Not a subtype; so must throw exception 895 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 896 897 // Store is OK. 898 __ bind(store_ok); 899 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 900 901 __ ba(done); 902 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 903 904 __ bind(is_null); 905 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 906 907 __ profile_null_seen(G3_scratch); 908 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 909 __ bind(done); 910 } 911 912 913 void TemplateTable::bastore() { 914 transition(itos, vtos); 915 __ pop_i(O2); // index 916 // Otos_i: val 917 // O2: index 918 // O3: array 919 __ index_check(O3, O2, 0, G3_scratch, O2); 920 // Need to check whether array is boolean or byte 921 // since both types share the bastore bytecode. 922 __ load_klass(O3, G4_scratch); 923 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch); 924 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch); 925 __ andcc(G3_scratch, G4_scratch, G0); 926 Label L_skip; 927 __ br(Assembler::zero, false, Assembler::pn, L_skip); 928 __ delayed()->nop(); 929 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1 930 __ bind(L_skip); 931 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 932 } 933 934 935 void TemplateTable::castore() { 936 transition(itos, vtos); 937 __ pop_i(O2); // index 938 // Otos_i: val 939 // O3: array 940 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 941 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 942 } 943 944 945 void TemplateTable::sastore() { 946 // %%%%% Factor across platform 947 castore(); 948 } 949 950 951 void TemplateTable::istore(int n) { 952 transition(itos, vtos); 953 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 954 } 955 956 957 void TemplateTable::lstore(int n) { 958 transition(ltos, vtos); 959 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 960 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 961 962 } 963 964 965 void TemplateTable::fstore(int n) { 966 transition(ftos, vtos); 967 assert(n < Argument::n_register_parameters, "only handle register cases"); 968 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 969 } 970 971 972 void TemplateTable::dstore(int n) { 973 transition(dtos, vtos); 974 FloatRegister src = Ftos_d; 975 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 976 } 977 978 979 void TemplateTable::astore(int n) { 980 transition(vtos, vtos); 981 __ load_ptr(0, Otos_i); 982 __ inc(Lesp, Interpreter::stackElementSize); 983 __ verify_oop_or_return_address(Otos_i, G3_scratch); 984 __ store_local_ptr(n, Otos_i); 985 } 986 987 988 void TemplateTable::pop() { 989 transition(vtos, vtos); 990 __ inc(Lesp, Interpreter::stackElementSize); 991 } 992 993 994 void TemplateTable::pop2() { 995 transition(vtos, vtos); 996 __ inc(Lesp, 2 * Interpreter::stackElementSize); 997 } 998 999 1000 void TemplateTable::dup() { 1001 transition(vtos, vtos); 1002 // stack: ..., a 1003 // load a and tag 1004 __ load_ptr(0, Otos_i); 1005 __ push_ptr(Otos_i); 1006 // stack: ..., a, a 1007 } 1008 1009 1010 void TemplateTable::dup_x1() { 1011 transition(vtos, vtos); 1012 // stack: ..., a, b 1013 __ load_ptr( 1, G3_scratch); // get a 1014 __ load_ptr( 0, Otos_l1); // get b 1015 __ store_ptr(1, Otos_l1); // put b 1016 __ store_ptr(0, G3_scratch); // put a - like swap 1017 __ push_ptr(Otos_l1); // push b 1018 // stack: ..., b, a, b 1019 } 1020 1021 1022 void TemplateTable::dup_x2() { 1023 transition(vtos, vtos); 1024 // stack: ..., a, b, c 1025 // get c and push on stack, reuse registers 1026 __ load_ptr( 0, G3_scratch); // get c 1027 __ push_ptr(G3_scratch); // push c with tag 1028 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1029 // (stack offsets n+1 now) 1030 __ load_ptr( 3, Otos_l1); // get a 1031 __ store_ptr(3, G3_scratch); // put c at 3 1032 // stack: ..., c, b, c, c (a in reg) 1033 __ load_ptr( 2, G3_scratch); // get b 1034 __ store_ptr(2, Otos_l1); // put a at 2 1035 // stack: ..., c, a, c, c (b in reg) 1036 __ store_ptr(1, G3_scratch); // put b at 1 1037 // stack: ..., c, a, b, c 1038 } 1039 1040 1041 void TemplateTable::dup2() { 1042 transition(vtos, vtos); 1043 __ load_ptr(1, G3_scratch); // get a 1044 __ load_ptr(0, Otos_l1); // get b 1045 __ push_ptr(G3_scratch); // push a 1046 __ push_ptr(Otos_l1); // push b 1047 // stack: ..., a, b, a, b 1048 } 1049 1050 1051 void TemplateTable::dup2_x1() { 1052 transition(vtos, vtos); 1053 // stack: ..., a, b, c 1054 __ load_ptr( 1, Lscratch); // get b 1055 __ load_ptr( 2, Otos_l1); // get a 1056 __ store_ptr(2, Lscratch); // put b at a 1057 // stack: ..., b, b, c 1058 __ load_ptr( 0, G3_scratch); // get c 1059 __ store_ptr(1, G3_scratch); // put c at b 1060 // stack: ..., b, c, c 1061 __ store_ptr(0, Otos_l1); // put a at c 1062 // stack: ..., b, c, a 1063 __ push_ptr(Lscratch); // push b 1064 __ push_ptr(G3_scratch); // push c 1065 // stack: ..., b, c, a, b, c 1066 } 1067 1068 1069 // The spec says that these types can be a mixture of category 1 (1 word) 1070 // types and/or category 2 types (long and doubles) 1071 void TemplateTable::dup2_x2() { 1072 transition(vtos, vtos); 1073 // stack: ..., a, b, c, d 1074 __ load_ptr( 1, Lscratch); // get c 1075 __ load_ptr( 3, Otos_l1); // get a 1076 __ store_ptr(3, Lscratch); // put c at 3 1077 __ store_ptr(1, Otos_l1); // put a at 1 1078 // stack: ..., c, b, a, d 1079 __ load_ptr( 2, G3_scratch); // get b 1080 __ load_ptr( 0, Otos_l1); // get d 1081 __ store_ptr(0, G3_scratch); // put b at 0 1082 __ store_ptr(2, Otos_l1); // put d at 2 1083 // stack: ..., c, d, a, b 1084 __ push_ptr(Lscratch); // push c 1085 __ push_ptr(Otos_l1); // push d 1086 // stack: ..., c, d, a, b, c, d 1087 } 1088 1089 1090 void TemplateTable::swap() { 1091 transition(vtos, vtos); 1092 // stack: ..., a, b 1093 __ load_ptr( 1, G3_scratch); // get a 1094 __ load_ptr( 0, Otos_l1); // get b 1095 __ store_ptr(0, G3_scratch); // put b 1096 __ store_ptr(1, Otos_l1); // put a 1097 // stack: ..., b, a 1098 } 1099 1100 1101 void TemplateTable::iop2(Operation op) { 1102 transition(itos, itos); 1103 __ pop_i(O1); 1104 switch (op) { 1105 case add: __ add(O1, Otos_i, Otos_i); break; 1106 case sub: __ sub(O1, Otos_i, Otos_i); break; 1107 // %%%%% Mul may not exist: better to call .mul? 1108 case mul: __ smul(O1, Otos_i, Otos_i); break; 1109 case _and: __ and3(O1, Otos_i, Otos_i); break; 1110 case _or: __ or3(O1, Otos_i, Otos_i); break; 1111 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1112 case shl: __ sll(O1, Otos_i, Otos_i); break; 1113 case shr: __ sra(O1, Otos_i, Otos_i); break; 1114 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1115 default: ShouldNotReachHere(); 1116 } 1117 } 1118 1119 1120 void TemplateTable::lop2(Operation op) { 1121 transition(ltos, ltos); 1122 __ pop_l(O2); 1123 switch (op) { 1124 #ifdef _LP64 1125 case add: __ add(O2, Otos_l, Otos_l); break; 1126 case sub: __ sub(O2, Otos_l, Otos_l); break; 1127 case _and: __ and3(O2, Otos_l, Otos_l); break; 1128 case _or: __ or3(O2, Otos_l, Otos_l); break; 1129 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1130 #else 1131 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break; 1132 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break; 1133 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break; 1134 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break; 1135 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break; 1136 #endif 1137 default: ShouldNotReachHere(); 1138 } 1139 } 1140 1141 1142 void TemplateTable::idiv() { 1143 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1144 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1145 1146 transition(itos, itos); 1147 __ pop_i(O1); // get 1st op 1148 1149 // Y contains upper 32 bits of result, set it to 0 or all ones 1150 __ wry(G0); 1151 __ mov(~0, G3_scratch); 1152 1153 __ tst(O1); 1154 Label neg; 1155 __ br(Assembler::negative, true, Assembler::pn, neg); 1156 __ delayed()->wry(G3_scratch); 1157 __ bind(neg); 1158 1159 Label ok; 1160 __ tst(Otos_i); 1161 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1162 1163 const int min_int = 0x80000000; 1164 Label regular; 1165 __ cmp(Otos_i, -1); 1166 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1167 #ifdef _LP64 1168 // Don't put set in delay slot 1169 // Set will turn into multiple instructions in 64 bit mode 1170 __ delayed()->nop(); 1171 __ set(min_int, G4_scratch); 1172 #else 1173 __ delayed()->set(min_int, G4_scratch); 1174 #endif 1175 Label done; 1176 __ cmp(O1, G4_scratch); 1177 __ br(Assembler::equal, true, Assembler::pt, done); 1178 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1179 1180 __ bind(regular); 1181 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1182 __ bind(done); 1183 } 1184 1185 1186 void TemplateTable::irem() { 1187 transition(itos, itos); 1188 __ mov(Otos_i, O2); // save divisor 1189 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1190 __ smul(Otos_i, O2, Otos_i); 1191 __ sub(O1, Otos_i, Otos_i); 1192 } 1193 1194 1195 void TemplateTable::lmul() { 1196 transition(ltos, ltos); 1197 __ pop_l(O2); 1198 #ifdef _LP64 1199 __ mulx(Otos_l, O2, Otos_l); 1200 #else 1201 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul)); 1202 #endif 1203 1204 } 1205 1206 1207 void TemplateTable::ldiv() { 1208 transition(ltos, ltos); 1209 1210 // check for zero 1211 __ pop_l(O2); 1212 #ifdef _LP64 1213 __ tst(Otos_l); 1214 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1215 __ sdivx(O2, Otos_l, Otos_l); 1216 #else 1217 __ orcc(Otos_l1, Otos_l2, G0); 1218 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1219 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); 1220 #endif 1221 } 1222 1223 1224 void TemplateTable::lrem() { 1225 transition(ltos, ltos); 1226 1227 // check for zero 1228 __ pop_l(O2); 1229 #ifdef _LP64 1230 __ tst(Otos_l); 1231 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1232 __ sdivx(O2, Otos_l, Otos_l2); 1233 __ mulx (Otos_l2, Otos_l, Otos_l2); 1234 __ sub (O2, Otos_l2, Otos_l); 1235 #else 1236 __ orcc(Otos_l1, Otos_l2, G0); 1237 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1238 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); 1239 #endif 1240 } 1241 1242 1243 void TemplateTable::lshl() { 1244 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1245 1246 __ pop_l(O2); // shift value in O2, O3 1247 #ifdef _LP64 1248 __ sllx(O2, Otos_i, Otos_l); 1249 #else 1250 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1251 #endif 1252 } 1253 1254 1255 void TemplateTable::lshr() { 1256 transition(itos, ltos); // %%%% see lshl comment 1257 1258 __ pop_l(O2); // shift value in O2, O3 1259 #ifdef _LP64 1260 __ srax(O2, Otos_i, Otos_l); 1261 #else 1262 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1263 #endif 1264 } 1265 1266 1267 1268 void TemplateTable::lushr() { 1269 transition(itos, ltos); // %%%% see lshl comment 1270 1271 __ pop_l(O2); // shift value in O2, O3 1272 #ifdef _LP64 1273 __ srlx(O2, Otos_i, Otos_l); 1274 #else 1275 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1276 #endif 1277 } 1278 1279 1280 void TemplateTable::fop2(Operation op) { 1281 transition(ftos, ftos); 1282 switch (op) { 1283 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1284 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1285 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1286 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1287 case rem: 1288 assert(Ftos_f == F0, "just checking"); 1289 #ifdef _LP64 1290 // LP64 calling conventions use F1, F3 for passing 2 floats 1291 __ pop_f(F1); 1292 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1293 #else 1294 __ pop_i(O0); 1295 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp); 1296 __ ld( __ d_tmp, O1 ); 1297 #endif 1298 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1299 assert( Ftos_f == F0, "fix this code" ); 1300 break; 1301 1302 default: ShouldNotReachHere(); 1303 } 1304 } 1305 1306 1307 void TemplateTable::dop2(Operation op) { 1308 transition(dtos, dtos); 1309 switch (op) { 1310 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1311 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1312 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1313 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1314 case rem: 1315 #ifdef _LP64 1316 // Pass arguments in D0, D2 1317 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1318 __ pop_d( F0 ); 1319 #else 1320 // Pass arguments in O0O1, O2O3 1321 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1322 __ ldd( __ d_tmp, O2 ); 1323 __ pop_d(Ftos_f); 1324 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1325 __ ldd( __ d_tmp, O0 ); 1326 #endif 1327 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1328 assert( Ftos_d == F0, "fix this code" ); 1329 break; 1330 1331 default: ShouldNotReachHere(); 1332 } 1333 } 1334 1335 1336 void TemplateTable::ineg() { 1337 transition(itos, itos); 1338 __ neg(Otos_i); 1339 } 1340 1341 1342 void TemplateTable::lneg() { 1343 transition(ltos, ltos); 1344 #ifdef _LP64 1345 __ sub(G0, Otos_l, Otos_l); 1346 #else 1347 __ lneg(Otos_l1, Otos_l2); 1348 #endif 1349 } 1350 1351 1352 void TemplateTable::fneg() { 1353 transition(ftos, ftos); 1354 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1355 } 1356 1357 1358 void TemplateTable::dneg() { 1359 transition(dtos, dtos); 1360 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1361 } 1362 1363 1364 void TemplateTable::iinc() { 1365 transition(vtos, vtos); 1366 locals_index(G3_scratch); 1367 __ ldsb(Lbcp, 2, O2); // load constant 1368 __ access_local_int(G3_scratch, Otos_i); 1369 __ add(Otos_i, O2, Otos_i); 1370 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1371 } 1372 1373 1374 void TemplateTable::wide_iinc() { 1375 transition(vtos, vtos); 1376 locals_index_wide(G3_scratch); 1377 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1378 __ access_local_int(G3_scratch, Otos_i); 1379 __ add(Otos_i, O3, Otos_i); 1380 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1381 } 1382 1383 1384 void TemplateTable::convert() { 1385 // %%%%% Factor this first part accross platforms 1386 #ifdef ASSERT 1387 TosState tos_in = ilgl; 1388 TosState tos_out = ilgl; 1389 switch (bytecode()) { 1390 case Bytecodes::_i2l: // fall through 1391 case Bytecodes::_i2f: // fall through 1392 case Bytecodes::_i2d: // fall through 1393 case Bytecodes::_i2b: // fall through 1394 case Bytecodes::_i2c: // fall through 1395 case Bytecodes::_i2s: tos_in = itos; break; 1396 case Bytecodes::_l2i: // fall through 1397 case Bytecodes::_l2f: // fall through 1398 case Bytecodes::_l2d: tos_in = ltos; break; 1399 case Bytecodes::_f2i: // fall through 1400 case Bytecodes::_f2l: // fall through 1401 case Bytecodes::_f2d: tos_in = ftos; break; 1402 case Bytecodes::_d2i: // fall through 1403 case Bytecodes::_d2l: // fall through 1404 case Bytecodes::_d2f: tos_in = dtos; break; 1405 default : ShouldNotReachHere(); 1406 } 1407 switch (bytecode()) { 1408 case Bytecodes::_l2i: // fall through 1409 case Bytecodes::_f2i: // fall through 1410 case Bytecodes::_d2i: // fall through 1411 case Bytecodes::_i2b: // fall through 1412 case Bytecodes::_i2c: // fall through 1413 case Bytecodes::_i2s: tos_out = itos; break; 1414 case Bytecodes::_i2l: // fall through 1415 case Bytecodes::_f2l: // fall through 1416 case Bytecodes::_d2l: tos_out = ltos; break; 1417 case Bytecodes::_i2f: // fall through 1418 case Bytecodes::_l2f: // fall through 1419 case Bytecodes::_d2f: tos_out = ftos; break; 1420 case Bytecodes::_i2d: // fall through 1421 case Bytecodes::_l2d: // fall through 1422 case Bytecodes::_f2d: tos_out = dtos; break; 1423 default : ShouldNotReachHere(); 1424 } 1425 transition(tos_in, tos_out); 1426 #endif 1427 1428 1429 // Conversion 1430 Label done; 1431 switch (bytecode()) { 1432 case Bytecodes::_i2l: 1433 #ifdef _LP64 1434 // Sign extend the 32 bits 1435 __ sra ( Otos_i, 0, Otos_l ); 1436 #else 1437 __ addcc(Otos_i, 0, Otos_l2); 1438 __ br(Assembler::greaterEqual, true, Assembler::pt, done); 1439 __ delayed()->clr(Otos_l1); 1440 __ set(~0, Otos_l1); 1441 #endif 1442 break; 1443 1444 case Bytecodes::_i2f: 1445 __ st(Otos_i, __ d_tmp ); 1446 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1447 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1448 break; 1449 1450 case Bytecodes::_i2d: 1451 __ st(Otos_i, __ d_tmp); 1452 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1453 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1454 break; 1455 1456 case Bytecodes::_i2b: 1457 __ sll(Otos_i, 24, Otos_i); 1458 __ sra(Otos_i, 24, Otos_i); 1459 break; 1460 1461 case Bytecodes::_i2c: 1462 __ sll(Otos_i, 16, Otos_i); 1463 __ srl(Otos_i, 16, Otos_i); 1464 break; 1465 1466 case Bytecodes::_i2s: 1467 __ sll(Otos_i, 16, Otos_i); 1468 __ sra(Otos_i, 16, Otos_i); 1469 break; 1470 1471 case Bytecodes::_l2i: 1472 #ifndef _LP64 1473 __ mov(Otos_l2, Otos_i); 1474 #else 1475 // Sign-extend into the high 32 bits 1476 __ sra(Otos_l, 0, Otos_i); 1477 #endif 1478 break; 1479 1480 case Bytecodes::_l2f: 1481 case Bytecodes::_l2d: 1482 __ st_long(Otos_l, __ d_tmp); 1483 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1484 1485 if (bytecode() == Bytecodes::_l2f) { 1486 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1487 } else { 1488 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1489 } 1490 break; 1491 1492 case Bytecodes::_f2i: { 1493 Label isNaN; 1494 // result must be 0 if value is NaN; test by comparing value to itself 1495 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1496 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1497 __ delayed()->clr(Otos_i); // NaN 1498 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1499 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1500 __ ld(__ d_tmp, Otos_i); 1501 __ bind(isNaN); 1502 } 1503 break; 1504 1505 case Bytecodes::_f2l: 1506 // must uncache tos 1507 __ push_f(); 1508 #ifdef _LP64 1509 __ pop_f(F1); 1510 #else 1511 __ pop_i(O0); 1512 #endif 1513 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1514 break; 1515 1516 case Bytecodes::_f2d: 1517 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1518 break; 1519 1520 case Bytecodes::_d2i: 1521 case Bytecodes::_d2l: 1522 // must uncache tos 1523 __ push_d(); 1524 #ifdef _LP64 1525 // LP64 calling conventions pass first double arg in D0 1526 __ pop_d( Ftos_d ); 1527 #else 1528 __ pop_i( O0 ); 1529 __ pop_i( O1 ); 1530 #endif 1531 __ call_VM_leaf(Lscratch, 1532 bytecode() == Bytecodes::_d2i 1533 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1534 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1535 break; 1536 1537 case Bytecodes::_d2f: 1538 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1539 break; 1540 1541 default: ShouldNotReachHere(); 1542 } 1543 __ bind(done); 1544 } 1545 1546 1547 void TemplateTable::lcmp() { 1548 transition(ltos, itos); 1549 1550 #ifdef _LP64 1551 __ pop_l(O1); // pop off value 1, value 2 is in O0 1552 __ lcmp( O1, Otos_l, Otos_i ); 1553 #else 1554 __ pop_l(O2); // cmp O2,3 to O0,1 1555 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i ); 1556 #endif 1557 } 1558 1559 1560 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1561 1562 if (is_float) __ pop_f(F2); 1563 else __ pop_d(F2); 1564 1565 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1566 1567 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1568 } 1569 1570 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1571 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1572 __ verify_thread(); 1573 1574 const Register O2_bumped_count = O2; 1575 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1576 1577 // get (wide) offset to O1_disp 1578 const Register O1_disp = O1; 1579 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1580 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1581 1582 // Handle all the JSR stuff here, then exit. 1583 // It's much shorter and cleaner than intermingling with the 1584 // non-JSR normal-branch stuff occurring below. 1585 if( is_jsr ) { 1586 // compute return address as bci in Otos_i 1587 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1588 __ sub(Lbcp, G3_scratch, G3_scratch); 1589 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1590 1591 // Bump Lbcp to target of JSR 1592 __ add(Lbcp, O1_disp, Lbcp); 1593 // Push returnAddress for "ret" on stack 1594 __ push_ptr(Otos_i); 1595 // And away we go! 1596 __ dispatch_next(vtos); 1597 return; 1598 } 1599 1600 // Normal (non-jsr) branch handling 1601 1602 // Save the current Lbcp 1603 const Register l_cur_bcp = Lscratch; 1604 __ mov( Lbcp, l_cur_bcp ); 1605 1606 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1607 if ( increment_invocation_counter_for_backward_branches ) { 1608 Label Lforward; 1609 // check branch direction 1610 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1611 // Bump bytecode pointer by displacement (take the branch) 1612 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1613 1614 const Register Rcounters = G3_scratch; 1615 __ get_method_counters(Lmethod, Rcounters, Lforward); 1616 1617 if (TieredCompilation) { 1618 Label Lno_mdo, Loverflow; 1619 int increment = InvocationCounter::count_increment; 1620 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1621 if (ProfileInterpreter) { 1622 // If no method data exists, go to profile_continue. 1623 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1624 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1625 1626 // Increment backedge counter in the MDO 1627 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1628 in_bytes(InvocationCounter::counter_offset())); 1629 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1630 Assembler::notZero, &Lforward); 1631 __ ba_short(Loverflow); 1632 } 1633 1634 // If there's no MDO, increment counter in MethodCounters* 1635 __ bind(Lno_mdo); 1636 Address backedge_counter(Rcounters, 1637 in_bytes(MethodCounters::backedge_counter_offset()) + 1638 in_bytes(InvocationCounter::counter_offset())); 1639 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1640 Assembler::notZero, &Lforward); 1641 __ bind(Loverflow); 1642 1643 // notify point for loop, pass branch bytecode 1644 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1645 1646 // Was an OSR adapter generated? 1647 // O0 = osr nmethod 1648 __ br_null_short(O0, Assembler::pn, Lforward); 1649 1650 // Has the nmethod been invalidated already? 1651 __ ld(O0, nmethod::entry_bci_offset(), O2); 1652 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward); 1653 1654 // migrate the interpreter frame off of the stack 1655 1656 __ mov(G2_thread, L7); 1657 // save nmethod 1658 __ mov(O0, L6); 1659 __ set_last_Java_frame(SP, noreg); 1660 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1661 __ reset_last_Java_frame(); 1662 __ mov(L7, G2_thread); 1663 1664 // move OSR nmethod to I1 1665 __ mov(L6, I1); 1666 1667 // OSR buffer to I0 1668 __ mov(O0, I0); 1669 1670 // remove the interpreter frame 1671 __ restore(I5_savedSP, 0, SP); 1672 1673 // Jump to the osr code. 1674 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1675 __ jmp(O2, G0); 1676 __ delayed()->nop(); 1677 1678 } else { 1679 // Update Backedge branch separately from invocations 1680 const Register G4_invoke_ctr = G4; 1681 __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); 1682 if (ProfileInterpreter) { 1683 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); 1684 if (UseOnStackReplacement) { 1685 __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch); 1686 } 1687 } else { 1688 if (UseOnStackReplacement) { 1689 __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); 1690 } 1691 } 1692 } 1693 1694 __ bind(Lforward); 1695 } else 1696 // Bump bytecode pointer by displacement (take the branch) 1697 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1698 1699 // continue with bytecode @ target 1700 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1701 // %%%%% and changing dispatch_next to dispatch_only 1702 __ dispatch_next(vtos); 1703 } 1704 1705 1706 // Note Condition in argument is TemplateTable::Condition 1707 // arg scope is within class scope 1708 1709 void TemplateTable::if_0cmp(Condition cc) { 1710 // no pointers, integer only! 1711 transition(itos, vtos); 1712 // assume branch is more often taken than not (loops use backward branches) 1713 __ cmp( Otos_i, 0); 1714 __ if_cmp(ccNot(cc), false); 1715 } 1716 1717 1718 void TemplateTable::if_icmp(Condition cc) { 1719 transition(itos, vtos); 1720 __ pop_i(O1); 1721 __ cmp(O1, Otos_i); 1722 __ if_cmp(ccNot(cc), false); 1723 } 1724 1725 1726 void TemplateTable::if_nullcmp(Condition cc) { 1727 transition(atos, vtos); 1728 __ tst(Otos_i); 1729 __ if_cmp(ccNot(cc), true); 1730 } 1731 1732 1733 void TemplateTable::if_acmp(Condition cc) { 1734 transition(atos, vtos); 1735 __ pop_ptr(O1); 1736 __ verify_oop(O1); 1737 __ verify_oop(Otos_i); 1738 __ cmp(O1, Otos_i); 1739 __ if_cmp(ccNot(cc), true); 1740 } 1741 1742 1743 1744 void TemplateTable::ret() { 1745 transition(vtos, vtos); 1746 locals_index(G3_scratch); 1747 __ access_local_returnAddress(G3_scratch, Otos_i); 1748 // Otos_i contains the bci, compute the bcp from that 1749 1750 #ifdef _LP64 1751 #ifdef ASSERT 1752 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1753 // the result. The return address (really a BCI) was stored with an 1754 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1755 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1756 // loaded value. 1757 { Label zzz ; 1758 __ set (65536, G3_scratch) ; 1759 __ cmp (Otos_i, G3_scratch) ; 1760 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1761 __ delayed()->nop(); 1762 __ stop("BCI is in the wrong register half?"); 1763 __ bind (zzz) ; 1764 } 1765 #endif 1766 #endif 1767 1768 __ profile_ret(vtos, Otos_i, G4_scratch); 1769 1770 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1771 __ add(G3_scratch, Otos_i, G3_scratch); 1772 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1773 __ dispatch_next(vtos); 1774 } 1775 1776 1777 void TemplateTable::wide_ret() { 1778 transition(vtos, vtos); 1779 locals_index_wide(G3_scratch); 1780 __ access_local_returnAddress(G3_scratch, Otos_i); 1781 // Otos_i contains the bci, compute the bcp from that 1782 1783 __ profile_ret(vtos, Otos_i, G4_scratch); 1784 1785 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1786 __ add(G3_scratch, Otos_i, G3_scratch); 1787 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1788 __ dispatch_next(vtos); 1789 } 1790 1791 1792 void TemplateTable::tableswitch() { 1793 transition(itos, vtos); 1794 Label default_case, continue_execution; 1795 1796 // align bcp 1797 __ add(Lbcp, BytesPerInt, O1); 1798 __ and3(O1, -BytesPerInt, O1); 1799 // load lo, hi 1800 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1801 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1802 #ifdef _LP64 1803 // Sign extend the 32 bits 1804 __ sra ( Otos_i, 0, Otos_i ); 1805 #endif /* _LP64 */ 1806 1807 // check against lo & hi 1808 __ cmp( Otos_i, O2); 1809 __ br( Assembler::less, false, Assembler::pn, default_case); 1810 __ delayed()->cmp( Otos_i, O3 ); 1811 __ br( Assembler::greater, false, Assembler::pn, default_case); 1812 // lookup dispatch offset 1813 __ delayed()->sub(Otos_i, O2, O2); 1814 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1815 __ sll(O2, LogBytesPerInt, O2); 1816 __ add(O2, 3 * BytesPerInt, O2); 1817 __ ba(continue_execution); 1818 __ delayed()->ld(O1, O2, O2); 1819 // handle default 1820 __ bind(default_case); 1821 __ profile_switch_default(O3); 1822 __ ld(O1, 0, O2); // get default offset 1823 // continue execution 1824 __ bind(continue_execution); 1825 __ add(Lbcp, O2, Lbcp); 1826 __ dispatch_next(vtos); 1827 } 1828 1829 1830 void TemplateTable::lookupswitch() { 1831 transition(itos, itos); 1832 __ stop("lookupswitch bytecode should have been rewritten"); 1833 } 1834 1835 void TemplateTable::fast_linearswitch() { 1836 transition(itos, vtos); 1837 Label loop_entry, loop, found, continue_execution; 1838 // align bcp 1839 __ add(Lbcp, BytesPerInt, O1); 1840 __ and3(O1, -BytesPerInt, O1); 1841 // set counter 1842 __ ld(O1, BytesPerInt, O2); 1843 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1844 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1845 __ ba(loop_entry); 1846 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1847 1848 // table search 1849 __ bind(loop); 1850 __ cmp(O4, Otos_i); 1851 __ br(Assembler::equal, true, Assembler::pn, found); 1852 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1853 __ inc(O3, 2 * BytesPerInt); 1854 1855 __ bind(loop_entry); 1856 __ cmp(O2, O3); 1857 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1858 __ delayed()->ld(O3, 0, O4); 1859 1860 // default case 1861 __ ld(O1, 0, O4); // get default offset 1862 if (ProfileInterpreter) { 1863 __ profile_switch_default(O3); 1864 __ ba_short(continue_execution); 1865 } 1866 1867 // entry found -> get offset 1868 __ bind(found); 1869 if (ProfileInterpreter) { 1870 __ sub(O3, O1, O3); 1871 __ sub(O3, 2*BytesPerInt, O3); 1872 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1873 __ profile_switch_case(O3, O1, O2, G3_scratch); 1874 1875 __ bind(continue_execution); 1876 } 1877 __ add(Lbcp, O4, Lbcp); 1878 __ dispatch_next(vtos); 1879 } 1880 1881 1882 void TemplateTable::fast_binaryswitch() { 1883 transition(itos, vtos); 1884 // Implementation using the following core algorithm: (copied from Intel) 1885 // 1886 // int binary_search(int key, LookupswitchPair* array, int n) { 1887 // // Binary search according to "Methodik des Programmierens" by 1888 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1889 // int i = 0; 1890 // int j = n; 1891 // while (i+1 < j) { 1892 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1893 // // with Q: for all i: 0 <= i < n: key < a[i] 1894 // // where a stands for the array and assuming that the (inexisting) 1895 // // element a[n] is infinitely big. 1896 // int h = (i + j) >> 1; 1897 // // i < h < j 1898 // if (key < array[h].fast_match()) { 1899 // j = h; 1900 // } else { 1901 // i = h; 1902 // } 1903 // } 1904 // // R: a[i] <= key < a[i+1] or Q 1905 // // (i.e., if key is within array, i is the correct index) 1906 // return i; 1907 // } 1908 1909 // register allocation 1910 assert(Otos_i == O0, "alias checking"); 1911 const Register Rkey = Otos_i; // already set (tosca) 1912 const Register Rarray = O1; 1913 const Register Ri = O2; 1914 const Register Rj = O3; 1915 const Register Rh = O4; 1916 const Register Rscratch = O5; 1917 1918 const int log_entry_size = 3; 1919 const int entry_size = 1 << log_entry_size; 1920 1921 Label found; 1922 // Find Array start 1923 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1924 __ and3(Rarray, -BytesPerInt, Rarray); 1925 // initialize i & j (in delay slot) 1926 __ clr( Ri ); 1927 1928 // and start 1929 Label entry; 1930 __ ba(entry); 1931 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1932 // (Rj is already in the native byte-ordering.) 1933 1934 // binary search loop 1935 { Label loop; 1936 __ bind( loop ); 1937 // int h = (i + j) >> 1; 1938 __ sra( Rh, 1, Rh ); 1939 // if (key < array[h].fast_match()) { 1940 // j = h; 1941 // } else { 1942 // i = h; 1943 // } 1944 __ sll( Rh, log_entry_size, Rscratch ); 1945 __ ld( Rarray, Rscratch, Rscratch ); 1946 // (Rscratch is already in the native byte-ordering.) 1947 __ cmp( Rkey, Rscratch ); 1948 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1949 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1950 1951 // while (i+1 < j) 1952 __ bind( entry ); 1953 __ add( Ri, 1, Rscratch ); 1954 __ cmp(Rscratch, Rj); 1955 __ br( Assembler::less, true, Assembler::pt, loop ); 1956 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1957 } 1958 1959 // end of binary search, result index is i (must check again!) 1960 Label default_case; 1961 Label continue_execution; 1962 if (ProfileInterpreter) { 1963 __ mov( Ri, Rh ); // Save index in i for profiling 1964 } 1965 __ sll( Ri, log_entry_size, Ri ); 1966 __ ld( Rarray, Ri, Rscratch ); 1967 // (Rscratch is already in the native byte-ordering.) 1968 __ cmp( Rkey, Rscratch ); 1969 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1970 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1971 1972 // entry found -> j = offset 1973 __ inc( Ri, BytesPerInt ); 1974 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1975 __ ld( Rarray, Ri, Rj ); 1976 // (Rj is already in the native byte-ordering.) 1977 1978 if (ProfileInterpreter) { 1979 __ ba_short(continue_execution); 1980 } 1981 1982 __ bind(default_case); // fall through (if not profiling) 1983 __ profile_switch_default(Ri); 1984 1985 __ bind(continue_execution); 1986 __ add( Lbcp, Rj, Lbcp ); 1987 __ dispatch_next( vtos ); 1988 } 1989 1990 1991 void TemplateTable::_return(TosState state) { 1992 transition(state, state); 1993 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1994 1995 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1996 assert(state == vtos, "only valid state"); 1997 __ mov(G0, G3_scratch); 1998 __ access_local_ptr(G3_scratch, Otos_i); 1999 __ load_klass(Otos_i, O2); 2000 __ set(JVM_ACC_HAS_FINALIZER, G3); 2001 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 2002 __ andcc(G3, O2, G0); 2003 Label skip_register_finalizer; 2004 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 2005 __ delayed()->nop(); 2006 2007 // Call out to do finalizer registration 2008 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 2009 2010 __ bind(skip_register_finalizer); 2011 } 2012 2013 // Narrow result if state is itos but result type is smaller. 2014 // Need to narrow in the return bytecode rather than in generate_return_entry 2015 // since compiled code callers expect the result to already be narrowed. 2016 if (state == itos) { 2017 __ narrow(Otos_i); 2018 } 2019 __ remove_activation(state, /* throw_monitor_exception */ true); 2020 2021 // The caller's SP was adjusted upon method entry to accomodate 2022 // the callee's non-argument locals. Undo that adjustment. 2023 __ ret(); // return to caller 2024 __ delayed()->restore(I5_savedSP, G0, SP); 2025 } 2026 2027 2028 // ---------------------------------------------------------------------------- 2029 // Volatile variables demand their effects be made known to all CPU's in 2030 // order. Store buffers on most chips allow reads & writes to reorder; the 2031 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2032 // memory barrier (i.e., it's not sufficient that the interpreter does not 2033 // reorder volatile references, the hardware also must not reorder them). 2034 // 2035 // According to the new Java Memory Model (JMM): 2036 // (1) All volatiles are serialized wrt to each other. 2037 // ALSO reads & writes act as aquire & release, so: 2038 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2039 // the read float up to before the read. It's OK for non-volatile memory refs 2040 // that happen before the volatile read to float down below it. 2041 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2042 // that happen BEFORE the write float down to after the write. It's OK for 2043 // non-volatile memory refs that happen after the volatile write to float up 2044 // before it. 2045 // 2046 // We only put in barriers around volatile refs (they are expensive), not 2047 // _between_ memory refs (that would require us to track the flavor of the 2048 // previous memory refs). Requirements (2) and (3) require some barriers 2049 // before volatile stores and after volatile loads. These nearly cover 2050 // requirement (1) but miss the volatile-store-volatile-load case. This final 2051 // case is placed after volatile-stores although it could just as well go 2052 // before volatile-loads. 2053 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 2054 // Helper function to insert a is-volatile test and memory barrier 2055 // All current sparc implementations run in TSO, needing only StoreLoad 2056 if ((order_constraint & Assembler::StoreLoad) == 0) return; 2057 __ membar( order_constraint ); 2058 } 2059 2060 // ---------------------------------------------------------------------------- 2061 void TemplateTable::resolve_cache_and_index(int byte_no, 2062 Register Rcache, 2063 Register index, 2064 size_t index_size) { 2065 // Depends on cpCacheOop layout! 2066 Label resolved; 2067 2068 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2069 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 2070 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode? 2071 __ br(Assembler::equal, false, Assembler::pt, resolved); 2072 __ delayed()->set((int)bytecode(), O1); 2073 2074 address entry; 2075 switch (bytecode()) { 2076 case Bytecodes::_getstatic : // fall through 2077 case Bytecodes::_putstatic : // fall through 2078 case Bytecodes::_getfield : // fall through 2079 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2080 case Bytecodes::_invokevirtual : // fall through 2081 case Bytecodes::_invokespecial : // fall through 2082 case Bytecodes::_invokestatic : // fall through 2083 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2084 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2085 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2086 default: 2087 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); 2088 break; 2089 } 2090 // first time invocation - must resolve first 2091 __ call_VM(noreg, entry, O1); 2092 // Update registers with resolved info 2093 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2094 __ bind(resolved); 2095 } 2096 2097 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2098 Register method, 2099 Register itable_index, 2100 Register flags, 2101 bool is_invokevirtual, 2102 bool is_invokevfinal, 2103 bool is_invokedynamic) { 2104 // Uses both G3_scratch and G4_scratch 2105 Register cache = G3_scratch; 2106 Register index = G4_scratch; 2107 assert_different_registers(cache, method, itable_index); 2108 2109 // determine constant pool cache field offsets 2110 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2111 const int method_offset = in_bytes( 2112 ConstantPoolCache::base_offset() + 2113 ((byte_no == f2_byte) 2114 ? ConstantPoolCacheEntry::f2_offset() 2115 : ConstantPoolCacheEntry::f1_offset() 2116 ) 2117 ); 2118 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2119 ConstantPoolCacheEntry::flags_offset()); 2120 // access constant pool cache fields 2121 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2122 ConstantPoolCacheEntry::f2_offset()); 2123 2124 if (is_invokevfinal) { 2125 __ get_cache_and_index_at_bcp(cache, index, 1); 2126 __ ld_ptr(Address(cache, method_offset), method); 2127 } else { 2128 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2129 resolve_cache_and_index(byte_no, cache, index, index_size); 2130 __ ld_ptr(Address(cache, method_offset), method); 2131 } 2132 2133 if (itable_index != noreg) { 2134 // pick up itable or appendix index from f2 also: 2135 __ ld_ptr(Address(cache, index_offset), itable_index); 2136 } 2137 __ ld_ptr(Address(cache, flags_offset), flags); 2138 } 2139 2140 // The Rcache register must be set before call 2141 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2142 Register Rcache, 2143 Register index, 2144 Register Roffset, 2145 Register Rflags, 2146 bool is_static) { 2147 assert_different_registers(Rcache, Rflags, Roffset); 2148 2149 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2150 2151 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2152 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2153 if (is_static) { 2154 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2155 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2156 __ ld_ptr( Robj, mirror_offset, Robj); 2157 } 2158 } 2159 2160 // The registers Rcache and index expected to be set before call. 2161 // Correct values of the Rcache and index registers are preserved. 2162 void TemplateTable::jvmti_post_field_access(Register Rcache, 2163 Register index, 2164 bool is_static, 2165 bool has_tos) { 2166 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2167 2168 if (JvmtiExport::can_post_field_access()) { 2169 // Check to see if a field access watch has been set before we take 2170 // the time to call into the VM. 2171 Label Label1; 2172 assert_different_registers(Rcache, index, G1_scratch); 2173 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2174 __ load_contents(get_field_access_count_addr, G1_scratch); 2175 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2176 2177 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2178 2179 if (is_static) { 2180 __ clr(Otos_i); 2181 } else { 2182 if (has_tos) { 2183 // save object pointer before call_VM() clobbers it 2184 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2185 } else { 2186 // Load top of stack (do not pop the value off the stack); 2187 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2188 } 2189 __ verify_oop(Otos_i); 2190 } 2191 // Otos_i: object pointer or NULL if static 2192 // Rcache: cache entry pointer 2193 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2194 Otos_i, Rcache); 2195 if (!is_static && has_tos) { 2196 __ pop_ptr(Otos_i); // restore object pointer 2197 __ verify_oop(Otos_i); 2198 } 2199 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2200 __ bind(Label1); 2201 } 2202 } 2203 2204 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2205 transition(vtos, vtos); 2206 2207 Register Rcache = G3_scratch; 2208 Register index = G4_scratch; 2209 Register Rclass = Rcache; 2210 Register Roffset= G4_scratch; 2211 Register Rflags = G1_scratch; 2212 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2213 2214 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2215 jvmti_post_field_access(Rcache, index, is_static, false); 2216 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2217 2218 if (!is_static) { 2219 pop_and_check_object(Rclass); 2220 } else { 2221 __ verify_oop(Rclass); 2222 } 2223 2224 Label exit; 2225 2226 Assembler::Membar_mask_bits membar_bits = 2227 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2228 2229 if (__ membar_has_effect(membar_bits)) { 2230 // Get volatile flag 2231 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2232 __ and3(Rflags, Lscratch, Lscratch); 2233 } 2234 2235 Label checkVolatile; 2236 2237 // compute field type 2238 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; 2239 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2240 // Make sure we don't need to mask Rflags after the above shift 2241 ConstantPoolCacheEntry::verify_tos_state_shift(); 2242 2243 // Check atos before itos for getstatic, more likely (in Queens at least) 2244 __ cmp(Rflags, atos); 2245 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2246 __ delayed() ->cmp(Rflags, itos); 2247 2248 // atos 2249 __ load_heap_oop(Rclass, Roffset, Otos_i); 2250 __ verify_oop(Otos_i); 2251 __ push(atos); 2252 if (!is_static) { 2253 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2254 } 2255 __ ba(checkVolatile); 2256 __ delayed()->tst(Lscratch); 2257 2258 __ bind(notObj); 2259 2260 // cmp(Rflags, itos); 2261 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2262 __ delayed() ->cmp(Rflags, ltos); 2263 2264 // itos 2265 __ ld(Rclass, Roffset, Otos_i); 2266 __ push(itos); 2267 if (!is_static) { 2268 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2269 } 2270 __ ba(checkVolatile); 2271 __ delayed()->tst(Lscratch); 2272 2273 __ bind(notInt); 2274 2275 // cmp(Rflags, ltos); 2276 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2277 __ delayed() ->cmp(Rflags, btos); 2278 2279 // ltos 2280 // load must be atomic 2281 __ ld_long(Rclass, Roffset, Otos_l); 2282 __ push(ltos); 2283 if (!is_static) { 2284 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2285 } 2286 __ ba(checkVolatile); 2287 __ delayed()->tst(Lscratch); 2288 2289 __ bind(notLong); 2290 2291 // cmp(Rflags, btos); 2292 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2293 __ delayed() ->cmp(Rflags, ztos); 2294 2295 // btos 2296 __ ldsb(Rclass, Roffset, Otos_i); 2297 __ push(itos); 2298 if (!is_static) { 2299 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2300 } 2301 __ ba(checkVolatile); 2302 __ delayed()->tst(Lscratch); 2303 2304 __ bind(notByte); 2305 2306 // cmp(Rflags, ztos); 2307 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2308 __ delayed() ->cmp(Rflags, ctos); 2309 2310 // ztos 2311 __ ldsb(Rclass, Roffset, Otos_i); 2312 __ push(itos); 2313 if (!is_static) { 2314 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2315 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2316 } 2317 __ ba(checkVolatile); 2318 __ delayed()->tst(Lscratch); 2319 2320 __ bind(notBool); 2321 2322 // cmp(Rflags, ctos); 2323 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2324 __ delayed() ->cmp(Rflags, stos); 2325 2326 // ctos 2327 __ lduh(Rclass, Roffset, Otos_i); 2328 __ push(itos); 2329 if (!is_static) { 2330 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2331 } 2332 __ ba(checkVolatile); 2333 __ delayed()->tst(Lscratch); 2334 2335 __ bind(notChar); 2336 2337 // cmp(Rflags, stos); 2338 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2339 __ delayed() ->cmp(Rflags, ftos); 2340 2341 // stos 2342 __ ldsh(Rclass, Roffset, Otos_i); 2343 __ push(itos); 2344 if (!is_static) { 2345 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2346 } 2347 __ ba(checkVolatile); 2348 __ delayed()->tst(Lscratch); 2349 2350 __ bind(notShort); 2351 2352 2353 // cmp(Rflags, ftos); 2354 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2355 __ delayed() ->tst(Lscratch); 2356 2357 // ftos 2358 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2359 __ push(ftos); 2360 if (!is_static) { 2361 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2362 } 2363 __ ba(checkVolatile); 2364 __ delayed()->tst(Lscratch); 2365 2366 __ bind(notFloat); 2367 2368 2369 // dtos 2370 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2371 __ push(dtos); 2372 if (!is_static) { 2373 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2374 } 2375 2376 __ bind(checkVolatile); 2377 if (__ membar_has_effect(membar_bits)) { 2378 // __ tst(Lscratch); executed in delay slot 2379 __ br(Assembler::zero, false, Assembler::pt, exit); 2380 __ delayed()->nop(); 2381 volatile_barrier(membar_bits); 2382 } 2383 2384 __ bind(exit); 2385 } 2386 2387 2388 void TemplateTable::getfield(int byte_no) { 2389 getfield_or_static(byte_no, false); 2390 } 2391 2392 void TemplateTable::getstatic(int byte_no) { 2393 getfield_or_static(byte_no, true); 2394 } 2395 2396 2397 void TemplateTable::fast_accessfield(TosState state) { 2398 transition(atos, state); 2399 Register Rcache = G3_scratch; 2400 Register index = G4_scratch; 2401 Register Roffset = G4_scratch; 2402 Register Rflags = Rcache; 2403 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2404 2405 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2406 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2407 2408 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2409 2410 __ null_check(Otos_i); 2411 __ verify_oop(Otos_i); 2412 2413 Label exit; 2414 2415 Assembler::Membar_mask_bits membar_bits = 2416 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2417 if (__ membar_has_effect(membar_bits)) { 2418 // Get volatile flag 2419 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2420 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2421 } 2422 2423 switch (bytecode()) { 2424 case Bytecodes::_fast_bgetfield: 2425 __ ldsb(Otos_i, Roffset, Otos_i); 2426 break; 2427 case Bytecodes::_fast_cgetfield: 2428 __ lduh(Otos_i, Roffset, Otos_i); 2429 break; 2430 case Bytecodes::_fast_sgetfield: 2431 __ ldsh(Otos_i, Roffset, Otos_i); 2432 break; 2433 case Bytecodes::_fast_igetfield: 2434 __ ld(Otos_i, Roffset, Otos_i); 2435 break; 2436 case Bytecodes::_fast_lgetfield: 2437 __ ld_long(Otos_i, Roffset, Otos_l); 2438 break; 2439 case Bytecodes::_fast_fgetfield: 2440 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2441 break; 2442 case Bytecodes::_fast_dgetfield: 2443 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2444 break; 2445 case Bytecodes::_fast_agetfield: 2446 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2447 break; 2448 default: 2449 ShouldNotReachHere(); 2450 } 2451 2452 if (__ membar_has_effect(membar_bits)) { 2453 __ btst(Lscratch, Rflags); 2454 __ br(Assembler::zero, false, Assembler::pt, exit); 2455 __ delayed()->nop(); 2456 volatile_barrier(membar_bits); 2457 __ bind(exit); 2458 } 2459 2460 if (state == atos) { 2461 __ verify_oop(Otos_i); // does not blow flags! 2462 } 2463 } 2464 2465 void TemplateTable::jvmti_post_fast_field_mod() { 2466 if (JvmtiExport::can_post_field_modification()) { 2467 // Check to see if a field modification watch has been set before we take 2468 // the time to call into the VM. 2469 Label done; 2470 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2471 __ load_contents(get_field_modification_count_addr, G4_scratch); 2472 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2473 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2474 __ verify_oop(G4_scratch); 2475 __ push_ptr(G4_scratch); // put the object pointer back on tos 2476 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2477 // Save tos values before call_VM() clobbers them. Since we have 2478 // to do it for every data type, we use the saved values as the 2479 // jvalue object. 2480 switch (bytecode()) { // save tos values before call_VM() clobbers them 2481 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2482 case Bytecodes::_fast_bputfield: // fall through 2483 case Bytecodes::_fast_zputfield: // fall through 2484 case Bytecodes::_fast_sputfield: // fall through 2485 case Bytecodes::_fast_cputfield: // fall through 2486 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2487 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2488 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2489 // get words in right order for use as jvalue object 2490 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2491 } 2492 // setup pointer to jvalue object 2493 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2494 // G4_scratch: object pointer 2495 // G1_scratch: cache entry pointer 2496 // G3_scratch: jvalue object on the stack 2497 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2498 switch (bytecode()) { // restore tos values 2499 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2500 case Bytecodes::_fast_bputfield: // fall through 2501 case Bytecodes::_fast_zputfield: // fall through 2502 case Bytecodes::_fast_sputfield: // fall through 2503 case Bytecodes::_fast_cputfield: // fall through 2504 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2505 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2506 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2507 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2508 } 2509 __ bind(done); 2510 } 2511 } 2512 2513 // The registers Rcache and index expected to be set before call. 2514 // The function may destroy various registers, just not the Rcache and index registers. 2515 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2516 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2517 2518 if (JvmtiExport::can_post_field_modification()) { 2519 // Check to see if a field modification watch has been set before we take 2520 // the time to call into the VM. 2521 Label Label1; 2522 assert_different_registers(Rcache, index, G1_scratch); 2523 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2524 __ load_contents(get_field_modification_count_addr, G1_scratch); 2525 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2526 2527 // The Rcache and index registers have been already set. 2528 // This allows to eliminate this call but the Rcache and index 2529 // registers must be correspondingly used after this line. 2530 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2531 2532 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2533 if (is_static) { 2534 // Life is simple. Null out the object pointer. 2535 __ clr(G4_scratch); 2536 } else { 2537 Register Rflags = G1_scratch; 2538 // Life is harder. The stack holds the value on top, followed by the 2539 // object. We don't know the size of the value, though; it could be 2540 // one or two words depending on its type. As a result, we must find 2541 // the type to determine where the object is. 2542 2543 Label two_word, valsizeknown; 2544 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2545 __ mov(Lesp, G4_scratch); 2546 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2547 // Make sure we don't need to mask Rflags after the above shift 2548 ConstantPoolCacheEntry::verify_tos_state_shift(); 2549 __ cmp(Rflags, ltos); 2550 __ br(Assembler::equal, false, Assembler::pt, two_word); 2551 __ delayed()->cmp(Rflags, dtos); 2552 __ br(Assembler::equal, false, Assembler::pt, two_word); 2553 __ delayed()->nop(); 2554 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2555 __ ba_short(valsizeknown); 2556 __ bind(two_word); 2557 2558 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2559 2560 __ bind(valsizeknown); 2561 // setup object pointer 2562 __ ld_ptr(G4_scratch, 0, G4_scratch); 2563 __ verify_oop(G4_scratch); 2564 } 2565 // setup pointer to jvalue object 2566 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2567 // G4_scratch: object pointer or NULL if static 2568 // G3_scratch: cache entry pointer 2569 // G1_scratch: jvalue object on the stack 2570 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2571 G4_scratch, G3_scratch, G1_scratch); 2572 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2573 __ bind(Label1); 2574 } 2575 } 2576 2577 void TemplateTable::pop_and_check_object(Register r) { 2578 __ pop_ptr(r); 2579 __ null_check(r); // for field access must check obj. 2580 __ verify_oop(r); 2581 } 2582 2583 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2584 transition(vtos, vtos); 2585 Register Rcache = G3_scratch; 2586 Register index = G4_scratch; 2587 Register Rclass = Rcache; 2588 Register Roffset= G4_scratch; 2589 Register Rflags = G1_scratch; 2590 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2591 2592 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2593 jvmti_post_field_mod(Rcache, index, is_static); 2594 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2595 2596 Assembler::Membar_mask_bits read_bits = 2597 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2598 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2599 2600 Label notVolatile, checkVolatile, exit; 2601 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2602 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2603 __ and3(Rflags, Lscratch, Lscratch); 2604 2605 if (__ membar_has_effect(read_bits)) { 2606 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2607 volatile_barrier(read_bits); 2608 __ bind(notVolatile); 2609 } 2610 } 2611 2612 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2613 // Make sure we don't need to mask Rflags after the above shift 2614 ConstantPoolCacheEntry::verify_tos_state_shift(); 2615 2616 // compute field type 2617 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat; 2618 2619 if (is_static) { 2620 // putstatic with object type most likely, check that first 2621 __ cmp(Rflags, atos); 2622 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2623 __ delayed()->cmp(Rflags, itos); 2624 2625 // atos 2626 { 2627 __ pop_ptr(); 2628 __ verify_oop(Otos_i); 2629 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2630 __ ba(checkVolatile); 2631 __ delayed()->tst(Lscratch); 2632 } 2633 2634 __ bind(notObj); 2635 // cmp(Rflags, itos); 2636 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2637 __ delayed()->cmp(Rflags, btos); 2638 2639 // itos 2640 { 2641 __ pop_i(); 2642 __ st(Otos_i, Rclass, Roffset); 2643 __ ba(checkVolatile); 2644 __ delayed()->tst(Lscratch); 2645 } 2646 2647 __ bind(notInt); 2648 } else { 2649 // putfield with int type most likely, check that first 2650 __ cmp(Rflags, itos); 2651 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2652 __ delayed()->cmp(Rflags, atos); 2653 2654 // itos 2655 { 2656 __ pop_i(); 2657 pop_and_check_object(Rclass); 2658 __ st(Otos_i, Rclass, Roffset); 2659 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2660 __ ba(checkVolatile); 2661 __ delayed()->tst(Lscratch); 2662 } 2663 2664 __ bind(notInt); 2665 // cmp(Rflags, atos); 2666 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2667 __ delayed()->cmp(Rflags, btos); 2668 2669 // atos 2670 { 2671 __ pop_ptr(); 2672 pop_and_check_object(Rclass); 2673 __ verify_oop(Otos_i); 2674 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2675 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2676 __ ba(checkVolatile); 2677 __ delayed()->tst(Lscratch); 2678 } 2679 2680 __ bind(notObj); 2681 } 2682 2683 // cmp(Rflags, btos); 2684 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2685 __ delayed()->cmp(Rflags, ztos); 2686 2687 // btos 2688 { 2689 __ pop_i(); 2690 if (!is_static) pop_and_check_object(Rclass); 2691 __ stb(Otos_i, Rclass, Roffset); 2692 if (!is_static) { 2693 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2694 } 2695 __ ba(checkVolatile); 2696 __ delayed()->tst(Lscratch); 2697 } 2698 2699 __ bind(notByte); 2700 2701 // cmp(Rflags, btos); 2702 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2703 __ delayed()->cmp(Rflags, ltos); 2704 2705 // ztos 2706 { 2707 __ pop_i(); 2708 if (!is_static) pop_and_check_object(Rclass); 2709 __ and3(Otos_i, 1, Otos_i); 2710 __ stb(Otos_i, Rclass, Roffset); 2711 if (!is_static) { 2712 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no); 2713 } 2714 __ ba(checkVolatile); 2715 __ delayed()->tst(Lscratch); 2716 } 2717 2718 __ bind(notBool); 2719 // cmp(Rflags, ltos); 2720 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2721 __ delayed()->cmp(Rflags, ctos); 2722 2723 // ltos 2724 { 2725 __ pop_l(); 2726 if (!is_static) pop_and_check_object(Rclass); 2727 __ st_long(Otos_l, Rclass, Roffset); 2728 if (!is_static) { 2729 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2730 } 2731 __ ba(checkVolatile); 2732 __ delayed()->tst(Lscratch); 2733 } 2734 2735 __ bind(notLong); 2736 // cmp(Rflags, ctos); 2737 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2738 __ delayed()->cmp(Rflags, stos); 2739 2740 // ctos (char) 2741 { 2742 __ pop_i(); 2743 if (!is_static) pop_and_check_object(Rclass); 2744 __ sth(Otos_i, Rclass, Roffset); 2745 if (!is_static) { 2746 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2747 } 2748 __ ba(checkVolatile); 2749 __ delayed()->tst(Lscratch); 2750 } 2751 2752 __ bind(notChar); 2753 // cmp(Rflags, stos); 2754 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2755 __ delayed()->cmp(Rflags, ftos); 2756 2757 // stos (short) 2758 { 2759 __ pop_i(); 2760 if (!is_static) pop_and_check_object(Rclass); 2761 __ sth(Otos_i, Rclass, Roffset); 2762 if (!is_static) { 2763 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2764 } 2765 __ ba(checkVolatile); 2766 __ delayed()->tst(Lscratch); 2767 } 2768 2769 __ bind(notShort); 2770 // cmp(Rflags, ftos); 2771 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2772 __ delayed()->nop(); 2773 2774 // ftos 2775 { 2776 __ pop_f(); 2777 if (!is_static) pop_and_check_object(Rclass); 2778 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2779 if (!is_static) { 2780 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2781 } 2782 __ ba(checkVolatile); 2783 __ delayed()->tst(Lscratch); 2784 } 2785 2786 __ bind(notFloat); 2787 2788 // dtos 2789 { 2790 __ pop_d(); 2791 if (!is_static) pop_and_check_object(Rclass); 2792 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2793 if (!is_static) { 2794 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2795 } 2796 } 2797 2798 __ bind(checkVolatile); 2799 __ tst(Lscratch); 2800 2801 if (__ membar_has_effect(write_bits)) { 2802 // __ tst(Lscratch); in delay slot 2803 __ br(Assembler::zero, false, Assembler::pt, exit); 2804 __ delayed()->nop(); 2805 volatile_barrier(Assembler::StoreLoad); 2806 __ bind(exit); 2807 } 2808 } 2809 2810 void TemplateTable::fast_storefield(TosState state) { 2811 transition(state, vtos); 2812 Register Rcache = G3_scratch; 2813 Register Rclass = Rcache; 2814 Register Roffset= G4_scratch; 2815 Register Rflags = G1_scratch; 2816 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2817 2818 jvmti_post_fast_field_mod(); 2819 2820 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2821 2822 Assembler::Membar_mask_bits read_bits = 2823 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2824 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2825 2826 Label notVolatile, checkVolatile, exit; 2827 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2828 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2829 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2830 __ and3(Rflags, Lscratch, Lscratch); 2831 if (__ membar_has_effect(read_bits)) { 2832 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2833 volatile_barrier(read_bits); 2834 __ bind(notVolatile); 2835 } 2836 } 2837 2838 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2839 pop_and_check_object(Rclass); 2840 2841 switch (bytecode()) { 2842 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield 2843 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2844 case Bytecodes::_fast_cputfield: /* fall through */ 2845 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2846 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2847 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2848 case Bytecodes::_fast_fputfield: 2849 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2850 break; 2851 case Bytecodes::_fast_dputfield: 2852 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2853 break; 2854 case Bytecodes::_fast_aputfield: 2855 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2856 break; 2857 default: 2858 ShouldNotReachHere(); 2859 } 2860 2861 if (__ membar_has_effect(write_bits)) { 2862 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2863 volatile_barrier(Assembler::StoreLoad); 2864 __ bind(exit); 2865 } 2866 } 2867 2868 2869 void TemplateTable::putfield(int byte_no) { 2870 putfield_or_static(byte_no, false); 2871 } 2872 2873 void TemplateTable::putstatic(int byte_no) { 2874 putfield_or_static(byte_no, true); 2875 } 2876 2877 2878 void TemplateTable::fast_xaccess(TosState state) { 2879 transition(vtos, state); 2880 Register Rcache = G3_scratch; 2881 Register Roffset = G4_scratch; 2882 Register Rflags = G4_scratch; 2883 Register Rreceiver = Lscratch; 2884 2885 __ ld_ptr(Llocals, 0, Rreceiver); 2886 2887 // access constant pool cache (is resolved) 2888 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2889 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2890 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2891 2892 __ verify_oop(Rreceiver); 2893 __ null_check(Rreceiver); 2894 if (state == atos) { 2895 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2896 } else if (state == itos) { 2897 __ ld (Rreceiver, Roffset, Otos_i) ; 2898 } else if (state == ftos) { 2899 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2900 } else { 2901 ShouldNotReachHere(); 2902 } 2903 2904 Assembler::Membar_mask_bits membar_bits = 2905 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2906 if (__ membar_has_effect(membar_bits)) { 2907 2908 // Get is_volatile value in Rflags and check if membar is needed 2909 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2910 2911 // Test volatile 2912 Label notVolatile; 2913 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2914 __ btst(Rflags, Lscratch); 2915 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2916 __ delayed()->nop(); 2917 volatile_barrier(membar_bits); 2918 __ bind(notVolatile); 2919 } 2920 2921 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2922 __ sub(Lbcp, 1, Lbcp); 2923 } 2924 2925 //---------------------------------------------------------------------------------------------------- 2926 // Calls 2927 2928 void TemplateTable::count_calls(Register method, Register temp) { 2929 // implemented elsewhere 2930 ShouldNotReachHere(); 2931 } 2932 2933 void TemplateTable::prepare_invoke(int byte_no, 2934 Register method, // linked method (or i-klass) 2935 Register ra, // return address 2936 Register index, // itable index, MethodType, etc. 2937 Register recv, // if caller wants to see it 2938 Register flags // if caller wants to test it 2939 ) { 2940 // determine flags 2941 const Bytecodes::Code code = bytecode(); 2942 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2943 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2944 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2945 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2946 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2947 const bool load_receiver = (recv != noreg); 2948 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2949 assert(recv == noreg || recv == O0, ""); 2950 assert(flags == noreg || flags == O1, ""); 2951 2952 // setup registers & access constant pool cache 2953 if (recv == noreg) recv = O0; 2954 if (flags == noreg) flags = O1; 2955 const Register temp = O2; 2956 assert_different_registers(method, ra, index, recv, flags, temp); 2957 2958 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2959 2960 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2961 2962 // maybe push appendix to arguments 2963 if (is_invokedynamic || is_invokehandle) { 2964 Label L_no_push; 2965 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2966 __ btst(flags, temp); 2967 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2968 __ delayed()->nop(); 2969 // Push the appendix as a trailing parameter. 2970 // This must be done before we get the receiver, 2971 // since the parameter_size includes it. 2972 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2973 __ load_resolved_reference_at_index(temp, index); 2974 __ verify_oop(temp); 2975 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2976 __ bind(L_no_push); 2977 } 2978 2979 // load receiver if needed (after appendix is pushed so parameter size is correct) 2980 if (load_receiver) { 2981 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2982 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2983 __ verify_oop(recv); 2984 } 2985 2986 // compute return type 2987 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2988 // Make sure we don't need to mask flags after the above shift 2989 ConstantPoolCacheEntry::verify_tos_state_shift(); 2990 // load return address 2991 { 2992 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2993 AddressLiteral table(table_addr); 2994 __ set(table, temp); 2995 __ sll(ra, LogBytesPerWord, ra); 2996 __ ld_ptr(Address(temp, ra), ra); 2997 } 2998 } 2999 3000 3001 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 3002 Register Rcall = Rindex; 3003 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3004 3005 // get target Method* & entry point 3006 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 3007 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3008 __ call_from_interpreter(Rcall, Gargs, Rret); 3009 } 3010 3011 void TemplateTable::invokevirtual(int byte_no) { 3012 transition(vtos, vtos); 3013 assert(byte_no == f2_byte, "use this argument"); 3014 3015 Register Rscratch = G3_scratch; 3016 Register Rtemp = G4_scratch; 3017 Register Rret = Lscratch; 3018 Register O0_recv = O0; 3019 Label notFinal; 3020 3021 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 3022 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3023 3024 // Check for vfinal 3025 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 3026 __ btst(Rret, G4_scratch); 3027 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3028 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 3029 3030 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 3031 3032 invokevfinal_helper(Rscratch, Rret); 3033 3034 __ bind(notFinal); 3035 3036 __ mov(G5_method, Rscratch); // better scratch register 3037 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 3038 // receiver is in O0_recv 3039 __ verify_oop(O0_recv); 3040 3041 // get return address 3042 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3043 __ set(table, Rtemp); 3044 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3045 // Make sure we don't need to mask Rret after the above shift 3046 ConstantPoolCacheEntry::verify_tos_state_shift(); 3047 __ sll(Rret, LogBytesPerWord, Rret); 3048 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3049 3050 // get receiver klass 3051 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3052 __ load_klass(O0_recv, O0_recv); 3053 __ verify_klass_ptr(O0_recv); 3054 3055 __ profile_virtual_call(O0_recv, O4); 3056 3057 generate_vtable_call(O0_recv, Rscratch, Rret); 3058 } 3059 3060 void TemplateTable::fast_invokevfinal(int byte_no) { 3061 transition(vtos, vtos); 3062 assert(byte_no == f2_byte, "use this argument"); 3063 3064 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 3065 /*is_invokevfinal*/true, false); 3066 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3067 invokevfinal_helper(G3_scratch, Lscratch); 3068 } 3069 3070 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 3071 Register Rtemp = G4_scratch; 3072 3073 // Load receiver from stack slot 3074 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 3075 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 3076 __ load_receiver(G4_scratch, O0); 3077 3078 // receiver NULL check 3079 __ null_check(O0); 3080 3081 __ profile_final_call(O4); 3082 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3083 3084 // get return address 3085 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3086 __ set(table, Rtemp); 3087 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3088 // Make sure we don't need to mask Rret after the above shift 3089 ConstantPoolCacheEntry::verify_tos_state_shift(); 3090 __ sll(Rret, LogBytesPerWord, Rret); 3091 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3092 3093 3094 // do the call 3095 __ call_from_interpreter(Rscratch, Gargs, Rret); 3096 } 3097 3098 3099 void TemplateTable::invokespecial(int byte_no) { 3100 transition(vtos, vtos); 3101 assert(byte_no == f1_byte, "use this argument"); 3102 3103 const Register Rret = Lscratch; 3104 const Register O0_recv = O0; 3105 const Register Rscratch = G3_scratch; 3106 3107 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3108 __ null_check(O0_recv); 3109 3110 // do the call 3111 __ profile_call(O4); 3112 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3113 __ call_from_interpreter(Rscratch, Gargs, Rret); 3114 } 3115 3116 3117 void TemplateTable::invokestatic(int byte_no) { 3118 transition(vtos, vtos); 3119 assert(byte_no == f1_byte, "use this argument"); 3120 3121 const Register Rret = Lscratch; 3122 const Register Rscratch = G3_scratch; 3123 3124 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3125 3126 // do the call 3127 __ profile_call(O4); 3128 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3129 __ call_from_interpreter(Rscratch, Gargs, Rret); 3130 } 3131 3132 void TemplateTable::invokeinterface_object_method(Register RKlass, 3133 Register Rcall, 3134 Register Rret, 3135 Register Rflags) { 3136 Register Rscratch = G4_scratch; 3137 Register Rindex = Lscratch; 3138 3139 assert_different_registers(Rscratch, Rindex, Rret); 3140 3141 Label notFinal; 3142 3143 // Check for vfinal 3144 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3145 __ btst(Rflags, Rscratch); 3146 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3147 __ delayed()->nop(); 3148 3149 __ profile_final_call(O4); 3150 3151 // do the call - the index (f2) contains the Method* 3152 assert_different_registers(G5_method, Gargs, Rcall); 3153 __ mov(Rindex, G5_method); 3154 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3155 __ call_from_interpreter(Rcall, Gargs, Rret); 3156 __ bind(notFinal); 3157 3158 __ profile_virtual_call(RKlass, O4); 3159 generate_vtable_call(RKlass, Rindex, Rret); 3160 } 3161 3162 3163 void TemplateTable::invokeinterface(int byte_no) { 3164 transition(vtos, vtos); 3165 assert(byte_no == f1_byte, "use this argument"); 3166 3167 const Register Rinterface = G1_scratch; 3168 const Register Rret = G3_scratch; 3169 const Register Rindex = Lscratch; 3170 const Register O0_recv = O0; 3171 const Register O1_flags = O1; 3172 const Register O2_Klass = O2; 3173 const Register Rscratch = G4_scratch; 3174 assert_different_registers(Rscratch, G5_method); 3175 3176 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); 3177 3178 // get receiver klass 3179 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3180 __ load_klass(O0_recv, O2_Klass); 3181 3182 // Special case of invokeinterface called for virtual method of 3183 // java.lang.Object. See cpCacheOop.cpp for details. 3184 // This code isn't produced by javac, but could be produced by 3185 // another compliant java compiler. 3186 Label notMethod; 3187 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3188 __ btst(O1_flags, Rscratch); 3189 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3190 __ delayed()->nop(); 3191 3192 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3193 3194 __ bind(notMethod); 3195 3196 __ profile_virtual_call(O2_Klass, O4); 3197 3198 // 3199 // find entry point to call 3200 // 3201 3202 // compute start of first itableOffsetEntry (which is at end of vtable) 3203 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3204 Label search; 3205 Register Rtemp = O1_flags; 3206 3207 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp); 3208 if (align_object_offset(1) > 1) { 3209 __ round_to(Rtemp, align_object_offset(1)); 3210 } 3211 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; 3212 if (Assembler::is_simm13(base)) { 3213 __ add(Rtemp, base, Rtemp); 3214 } else { 3215 __ set(base, Rscratch); 3216 __ add(Rscratch, Rtemp, Rtemp); 3217 } 3218 __ add(O2_Klass, Rtemp, Rscratch); 3219 3220 __ bind(search); 3221 3222 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); 3223 { 3224 Label ok; 3225 3226 // Check that entry is non-null. Null entries are probably a bytecode 3227 // problem. If the interface isn't implemented by the receiver class, 3228 // the VM should throw IncompatibleClassChangeError. linkResolver checks 3229 // this too but that's only if the entry isn't already resolved, so we 3230 // need to check again. 3231 __ br_notnull_short( Rtemp, Assembler::pt, ok); 3232 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3233 __ should_not_reach_here(); 3234 __ bind(ok); 3235 } 3236 3237 __ cmp(Rinterface, Rtemp); 3238 __ brx(Assembler::notEqual, true, Assembler::pn, search); 3239 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); 3240 3241 // entry found and Rscratch points to it 3242 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); 3243 3244 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); 3245 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; 3246 __ add(Rscratch, Rindex, Rscratch); 3247 __ ld_ptr(O2_Klass, Rscratch, G5_method); 3248 3249 // Check for abstract method error. 3250 { 3251 Label ok; 3252 __ br_notnull_short(G5_method, Assembler::pt, ok); 3253 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3254 __ should_not_reach_here(); 3255 __ bind(ok); 3256 } 3257 3258 Register Rcall = Rinterface; 3259 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3260 3261 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3262 __ call_from_interpreter(Rcall, Gargs, Rret); 3263 } 3264 3265 void TemplateTable::invokehandle(int byte_no) { 3266 transition(vtos, vtos); 3267 assert(byte_no == f1_byte, "use this argument"); 3268 3269 if (!EnableInvokeDynamic) { 3270 // rewriter does not generate this bytecode 3271 __ should_not_reach_here(); 3272 return; 3273 } 3274 3275 const Register Rret = Lscratch; 3276 const Register G4_mtype = G4_scratch; 3277 const Register O0_recv = O0; 3278 const Register Rscratch = G3_scratch; 3279 3280 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3281 __ null_check(O0_recv); 3282 3283 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3284 // G5: MH.invokeExact_MT method (from f2) 3285 3286 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3287 3288 // do the call 3289 __ verify_oop(G4_mtype); 3290 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3291 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3292 __ call_from_interpreter(Rscratch, Gargs, Rret); 3293 } 3294 3295 3296 void TemplateTable::invokedynamic(int byte_no) { 3297 transition(vtos, vtos); 3298 assert(byte_no == f1_byte, "use this argument"); 3299 3300 if (!EnableInvokeDynamic) { 3301 // We should not encounter this bytecode if !EnableInvokeDynamic. 3302 // The verifier will stop it. However, if we get past the verifier, 3303 // this will stop the thread in a reasonable way, without crashing the JVM. 3304 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3305 InterpreterRuntime::throw_IncompatibleClassChangeError)); 3306 // the call_VM checks for exception, so we should never return here. 3307 __ should_not_reach_here(); 3308 return; 3309 } 3310 3311 const Register Rret = Lscratch; 3312 const Register G4_callsite = G4_scratch; 3313 const Register Rscratch = G3_scratch; 3314 3315 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3316 3317 // G4: CallSite object (from cpool->resolved_references[f1]) 3318 // G5: MH.linkToCallSite method (from f2) 3319 3320 // Note: G4_callsite is already pushed by prepare_invoke 3321 3322 // %%% should make a type profile for any invokedynamic that takes a ref argument 3323 // profile this call 3324 __ profile_call(O4); 3325 3326 // do the call 3327 __ verify_oop(G4_callsite); 3328 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3329 __ call_from_interpreter(Rscratch, Gargs, Rret); 3330 } 3331 3332 3333 //---------------------------------------------------------------------------------------------------- 3334 // Allocation 3335 3336 void TemplateTable::_new() { 3337 transition(vtos, atos); 3338 3339 Label slow_case; 3340 Label done; 3341 Label initialize_header; 3342 Label initialize_object; // including clearing the fields 3343 3344 Register RallocatedObject = Otos_i; 3345 Register RinstanceKlass = O1; 3346 Register Roffset = O3; 3347 Register Rscratch = O4; 3348 3349 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3350 __ get_cpool_and_tags(Rscratch, G3_scratch); 3351 // make sure the class we're about to instantiate has been resolved 3352 // This is done before loading InstanceKlass to be consistent with the order 3353 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3354 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3355 __ ldub(G3_scratch, Roffset, G3_scratch); 3356 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3357 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3358 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3359 // get InstanceKlass 3360 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot 3361 __ add(Roffset, sizeof(ConstantPool), Roffset); 3362 __ ld_ptr(Rscratch, Roffset, RinstanceKlass); 3363 3364 // make sure klass is fully initialized: 3365 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3366 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3367 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3368 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3369 3370 // get instance_size in InstanceKlass (already aligned) 3371 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3372 3373 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3374 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3375 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3376 __ delayed()->nop(); 3377 3378 // allocate the instance 3379 // 1) Try to allocate in the TLAB 3380 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden 3381 // 3) if the above fails (or is not applicable), go to a slow case 3382 // (creates a new TLAB, etc.) 3383 3384 const bool allow_shared_alloc = 3385 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3386 3387 if(UseTLAB) { 3388 Register RoldTopValue = RallocatedObject; 3389 Register RtlabWasteLimitValue = G3_scratch; 3390 Register RnewTopValue = G1_scratch; 3391 Register RendValue = Rscratch; 3392 Register RfreeValue = RnewTopValue; 3393 3394 // check if we can allocate in the TLAB 3395 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3396 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3397 __ add(RoldTopValue, Roffset, RnewTopValue); 3398 3399 // if there is enough space, we do not CAS and do not clear 3400 __ cmp(RnewTopValue, RendValue); 3401 if(ZeroTLAB) { 3402 // the fields have already been cleared 3403 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3404 } else { 3405 // initialize both the header and fields 3406 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3407 } 3408 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3409 3410 if (allow_shared_alloc) { 3411 // Check if tlab should be discarded (refill_waste_limit >= free) 3412 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); 3413 __ sub(RendValue, RoldTopValue, RfreeValue); 3414 #ifdef _LP64 3415 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); 3416 #else 3417 __ srl(RfreeValue, LogHeapWordSize, RfreeValue); 3418 #endif 3419 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small 3420 3421 // increment waste limit to prevent getting stuck on this slow path 3422 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); 3423 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3424 } else { 3425 // No allocation in the shared eden. 3426 __ ba_short(slow_case); 3427 } 3428 } 3429 3430 // Allocation in the shared Eden 3431 if (allow_shared_alloc) { 3432 Register RoldTopValue = G1_scratch; 3433 Register RtopAddr = G3_scratch; 3434 Register RnewTopValue = RallocatedObject; 3435 Register RendValue = Rscratch; 3436 3437 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3438 3439 Label retry; 3440 __ bind(retry); 3441 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3442 __ ld_ptr(RendValue, 0, RendValue); 3443 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3444 __ add(RoldTopValue, Roffset, RnewTopValue); 3445 3446 // RnewTopValue contains the top address after the new object 3447 // has been allocated. 3448 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3449 3450 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3451 3452 // if someone beat us on the allocation, try again, otherwise continue 3453 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3454 3455 // bump total bytes allocated by this thread 3456 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3457 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3458 } 3459 3460 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3461 // clear object fields 3462 __ bind(initialize_object); 3463 __ deccc(Roffset, sizeof(oopDesc)); 3464 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3465 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3466 3467 // initialize remaining object fields 3468 if (UseBlockZeroing) { 3469 // Use BIS for zeroing 3470 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3471 } else { 3472 Label loop; 3473 __ subcc(Roffset, wordSize, Roffset); 3474 __ bind(loop); 3475 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3476 __ st_ptr(G0, G3_scratch, Roffset); 3477 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3478 __ delayed()->subcc(Roffset, wordSize, Roffset); 3479 } 3480 __ ba_short(initialize_header); 3481 } 3482 3483 // slow case 3484 __ bind(slow_case); 3485 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3486 __ get_constant_pool(O1); 3487 3488 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3489 3490 __ ba_short(done); 3491 3492 // Initialize the header: mark, klass 3493 __ bind(initialize_header); 3494 3495 if (UseBiasedLocking) { 3496 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3497 } else { 3498 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3499 } 3500 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3501 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3502 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3503 3504 { 3505 SkipIfEqual skip_if( 3506 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3507 // Trigger dtrace event 3508 __ push(atos); 3509 __ call_VM_leaf(noreg, 3510 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3511 __ pop(atos); 3512 } 3513 3514 // continue 3515 __ bind(done); 3516 } 3517 3518 3519 3520 void TemplateTable::newarray() { 3521 transition(itos, atos); 3522 __ ldub(Lbcp, 1, O1); 3523 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3524 } 3525 3526 3527 void TemplateTable::anewarray() { 3528 transition(itos, atos); 3529 __ get_constant_pool(O1); 3530 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3531 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3532 } 3533 3534 3535 void TemplateTable::arraylength() { 3536 transition(atos, itos); 3537 Label ok; 3538 __ verify_oop(Otos_i); 3539 __ tst(Otos_i); 3540 __ throw_if_not_1_x( Assembler::notZero, ok ); 3541 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3542 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3543 } 3544 3545 3546 void TemplateTable::checkcast() { 3547 transition(atos, atos); 3548 Label done, is_null, quicked, cast_ok, resolved; 3549 Register Roffset = G1_scratch; 3550 Register RobjKlass = O5; 3551 Register RspecifiedKlass = O4; 3552 3553 // Check for casting a NULL 3554 __ br_null_short(Otos_i, Assembler::pn, is_null); 3555 3556 // Get value klass in RobjKlass 3557 __ load_klass(Otos_i, RobjKlass); // get value klass 3558 3559 // Get constant pool tag 3560 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3561 3562 // See if the checkcast has been quickened 3563 __ get_cpool_and_tags(Lscratch, G3_scratch); 3564 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3565 __ ldub(G3_scratch, Roffset, G3_scratch); 3566 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3567 __ br(Assembler::equal, true, Assembler::pt, quicked); 3568 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3569 3570 __ push_ptr(); // save receiver for result, and for GC 3571 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3572 __ get_vm_result_2(RspecifiedKlass); 3573 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3574 3575 __ ba_short(resolved); 3576 3577 // Extract target class from constant pool 3578 __ bind(quicked); 3579 __ add(Roffset, sizeof(ConstantPool), Roffset); 3580 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3581 __ bind(resolved); 3582 __ load_klass(Otos_i, RobjKlass); // get value klass 3583 3584 // Generate a fast subtype check. Branch to cast_ok if no 3585 // failure. Throw exception if failure. 3586 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3587 3588 // Not a subtype; so must throw exception 3589 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3590 3591 __ bind(cast_ok); 3592 3593 if (ProfileInterpreter) { 3594 __ ba_short(done); 3595 } 3596 __ bind(is_null); 3597 __ profile_null_seen(G3_scratch); 3598 __ bind(done); 3599 } 3600 3601 3602 void TemplateTable::instanceof() { 3603 Label done, is_null, quicked, resolved; 3604 transition(atos, itos); 3605 Register Roffset = G1_scratch; 3606 Register RobjKlass = O5; 3607 Register RspecifiedKlass = O4; 3608 3609 // Check for casting a NULL 3610 __ br_null_short(Otos_i, Assembler::pt, is_null); 3611 3612 // Get value klass in RobjKlass 3613 __ load_klass(Otos_i, RobjKlass); // get value klass 3614 3615 // Get constant pool tag 3616 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3617 3618 // See if the checkcast has been quickened 3619 __ get_cpool_and_tags(Lscratch, G3_scratch); 3620 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3621 __ ldub(G3_scratch, Roffset, G3_scratch); 3622 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3623 __ br(Assembler::equal, true, Assembler::pt, quicked); 3624 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3625 3626 __ push_ptr(); // save receiver for result, and for GC 3627 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3628 __ get_vm_result_2(RspecifiedKlass); 3629 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3630 3631 __ ba_short(resolved); 3632 3633 // Extract target class from constant pool 3634 __ bind(quicked); 3635 __ add(Roffset, sizeof(ConstantPool), Roffset); 3636 __ get_constant_pool(Lscratch); 3637 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3638 __ bind(resolved); 3639 __ load_klass(Otos_i, RobjKlass); // get value klass 3640 3641 // Generate a fast subtype check. Branch to cast_ok if no 3642 // failure. Return 0 if failure. 3643 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3644 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3645 // Not a subtype; return 0; 3646 __ clr( Otos_i ); 3647 3648 if (ProfileInterpreter) { 3649 __ ba_short(done); 3650 } 3651 __ bind(is_null); 3652 __ profile_null_seen(G3_scratch); 3653 __ bind(done); 3654 } 3655 3656 void TemplateTable::_breakpoint() { 3657 3658 // Note: We get here even if we are single stepping.. 3659 // jbug inists on setting breakpoints at every bytecode 3660 // even if we are in single step mode. 3661 3662 transition(vtos, vtos); 3663 // get the unpatched byte code 3664 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3665 __ mov(O0, Lbyte_code); 3666 3667 // post the breakpoint event 3668 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3669 3670 // complete the execution of original bytecode 3671 __ dispatch_normal(vtos); 3672 } 3673 3674 3675 //---------------------------------------------------------------------------------------------------- 3676 // Exceptions 3677 3678 void TemplateTable::athrow() { 3679 transition(atos, vtos); 3680 3681 // This works because exception is cached in Otos_i which is same as O0, 3682 // which is same as what throw_exception_entry_expects 3683 assert(Otos_i == Oexception, "see explanation above"); 3684 3685 __ verify_oop(Otos_i); 3686 __ null_check(Otos_i); 3687 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3688 } 3689 3690 3691 //---------------------------------------------------------------------------------------------------- 3692 // Synchronization 3693 3694 3695 // See frame_sparc.hpp for monitor block layout. 3696 // Monitor elements are dynamically allocated by growing stack as needed. 3697 3698 void TemplateTable::monitorenter() { 3699 transition(atos, vtos); 3700 __ verify_oop(Otos_i); 3701 // Try to acquire a lock on the object 3702 // Repeat until succeeded (i.e., until 3703 // monitorenter returns true). 3704 3705 { Label ok; 3706 __ tst(Otos_i); 3707 __ throw_if_not_1_x( Assembler::notZero, ok); 3708 __ delayed()->mov(Otos_i, Lscratch); // save obj 3709 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3710 } 3711 3712 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3713 3714 // find a free slot in the monitor block 3715 3716 3717 // initialize entry pointer 3718 __ clr(O1); // points to free slot or NULL 3719 3720 { 3721 Label entry, loop, exit; 3722 __ add( __ top_most_monitor(), O2 ); // last one to check 3723 __ ba( entry ); 3724 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3725 3726 3727 __ bind( loop ); 3728 3729 __ verify_oop(O4); // verify each monitor's oop 3730 __ tst(O4); // is this entry unused? 3731 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3732 3733 __ cmp(O4, O0); // check if current entry is for same object 3734 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3735 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3736 3737 __ bind( entry ); 3738 3739 __ cmp( O3, O2 ); 3740 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3741 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3742 3743 __ bind( exit ); 3744 } 3745 3746 { Label allocated; 3747 3748 // found free slot? 3749 __ br_notnull_short(O1, Assembler::pn, allocated); 3750 3751 __ add_monitor_to_stack( false, O2, O3 ); 3752 __ mov(Lmonitors, O1); 3753 3754 __ bind(allocated); 3755 } 3756 3757 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3758 // The object has already been poped from the stack, so the expression stack looks correct. 3759 __ inc(Lbcp); 3760 3761 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3762 __ lock_object(O1, O0); 3763 3764 // check if there's enough space on the stack for the monitors after locking 3765 __ generate_stack_overflow_check(0); 3766 3767 // The bcp has already been incremented. Just need to dispatch to next instruction. 3768 __ dispatch_next(vtos); 3769 } 3770 3771 3772 void TemplateTable::monitorexit() { 3773 transition(atos, vtos); 3774 __ verify_oop(Otos_i); 3775 __ tst(Otos_i); 3776 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3777 3778 assert(O0 == Otos_i, "just checking"); 3779 3780 { Label entry, loop, found; 3781 __ add( __ top_most_monitor(), O2 ); // last one to check 3782 __ ba(entry); 3783 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3784 // By using a local it survives the call to the C routine. 3785 __ delayed()->mov( Lmonitors, Lscratch ); 3786 3787 __ bind( loop ); 3788 3789 __ verify_oop(O4); // verify each monitor's oop 3790 __ cmp(O4, O0); // check if current entry is for desired object 3791 __ brx( Assembler::equal, true, Assembler::pt, found ); 3792 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3793 3794 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3795 3796 __ bind( entry ); 3797 3798 __ cmp( Lscratch, O2 ); 3799 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3800 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3801 3802 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3803 __ should_not_reach_here(); 3804 3805 __ bind(found); 3806 } 3807 __ unlock_object(O1); 3808 } 3809 3810 3811 //---------------------------------------------------------------------------------------------------- 3812 // Wide instructions 3813 3814 void TemplateTable::wide() { 3815 transition(vtos, vtos); 3816 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3817 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3818 AddressLiteral ep(Interpreter::_wentry_point); 3819 __ set(ep, G4_scratch); 3820 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3821 __ jmp(G3_scratch, G0); 3822 __ delayed()->nop(); 3823 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3824 } 3825 3826 3827 //---------------------------------------------------------------------------------------------------- 3828 // Multi arrays 3829 3830 void TemplateTable::multianewarray() { 3831 transition(vtos, atos); 3832 // put ndims * wordSize into Lscratch 3833 __ ldub( Lbcp, 3, Lscratch); 3834 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3835 // Lesp points past last_dim, so set to O1 to first_dim address 3836 __ add( Lesp, Lscratch, O1); 3837 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3838 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3839 } 3840 #endif /* !CC_INTERP */