1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/templateTable.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodData.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/stubRoutines.hpp" 36 #include "runtime/synchronizer.hpp" 37 #include "utilities/macros.hpp" 38 39 #ifndef CC_INTERP 40 #define __ _masm-> 41 42 // Misc helpers 43 44 // Do an oop store like *(base + index + offset) = val 45 // index can be noreg, 46 static void do_oop_store(InterpreterMacroAssembler* _masm, 47 Register base, 48 Register index, 49 int offset, 50 Register val, 51 Register tmp, 52 BarrierSet::Name barrier, 53 bool precise) { 54 assert(tmp != val && tmp != base && tmp != index, "register collision"); 55 assert(index == noreg || offset == 0, "only one offset"); 56 switch (barrier) { 57 #if INCLUDE_ALL_GCS 58 case BarrierSet::G1SATBCT: 59 case BarrierSet::G1SATBCTLogging: 60 { 61 // Load and record the previous value. 62 __ g1_write_barrier_pre(base, index, offset, 63 noreg /* pre_val */, 64 tmp, true /*preserve_o_regs*/); 65 66 // G1 barrier needs uncompressed oop for region cross check. 67 Register new_val = val; 68 if (UseCompressedOops && val != G0) { 69 new_val = tmp; 70 __ mov(val, new_val); 71 } 72 73 if (index == noreg ) { 74 assert(Assembler::is_simm13(offset), "fix this code"); 75 __ store_heap_oop(val, base, offset); 76 } else { 77 __ store_heap_oop(val, base, index); 78 } 79 80 // No need for post barrier if storing NULL 81 if (val != G0) { 82 if (precise) { 83 if (index == noreg) { 84 __ add(base, offset, base); 85 } else { 86 __ add(base, index, base); 87 } 88 } 89 __ g1_write_barrier_post(base, new_val, tmp); 90 } 91 } 92 break; 93 #endif // INCLUDE_ALL_GCS 94 case BarrierSet::CardTableModRef: 95 case BarrierSet::CardTableExtension: 96 { 97 if (index == noreg ) { 98 assert(Assembler::is_simm13(offset), "fix this code"); 99 __ store_heap_oop(val, base, offset); 100 } else { 101 __ store_heap_oop(val, base, index); 102 } 103 // No need for post barrier if storing NULL 104 if (val != G0) { 105 if (precise) { 106 if (index == noreg) { 107 __ add(base, offset, base); 108 } else { 109 __ add(base, index, base); 110 } 111 } 112 __ card_write_barrier_post(base, val, tmp); 113 } 114 } 115 break; 116 case BarrierSet::Epsilon: 117 if (index == noreg) { 118 assert(Assembler::is_simm13(offset), "fix this code"); 119 __ store_heap_oop(val, base, offset); 120 } else { 121 __ store_heap_oop(val, base, index); 122 } 123 break; 124 case BarrierSet::ModRef: 125 case BarrierSet::Other: 126 ShouldNotReachHere(); 127 break; 128 default : 129 ShouldNotReachHere(); 130 131 } 132 } 133 134 135 //---------------------------------------------------------------------------------------------------- 136 // Platform-dependent initialization 137 138 void TemplateTable::pd_initialize() { 139 // (none) 140 } 141 142 143 //---------------------------------------------------------------------------------------------------- 144 // Condition conversion 145 Assembler::Condition ccNot(TemplateTable::Condition cc) { 146 switch (cc) { 147 case TemplateTable::equal : return Assembler::notEqual; 148 case TemplateTable::not_equal : return Assembler::equal; 149 case TemplateTable::less : return Assembler::greaterEqual; 150 case TemplateTable::less_equal : return Assembler::greater; 151 case TemplateTable::greater : return Assembler::lessEqual; 152 case TemplateTable::greater_equal: return Assembler::less; 153 } 154 ShouldNotReachHere(); 155 return Assembler::zero; 156 } 157 158 //---------------------------------------------------------------------------------------------------- 159 // Miscelaneous helper routines 160 161 162 Address TemplateTable::at_bcp(int offset) { 163 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 164 return Address(Lbcp, offset); 165 } 166 167 168 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 169 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 170 int byte_no) { 171 // With sharing on, may need to test Method* flag. 172 if (!RewriteBytecodes) return; 173 Label L_patch_done; 174 175 switch (bc) { 176 case Bytecodes::_fast_aputfield: 177 case Bytecodes::_fast_bputfield: 178 case Bytecodes::_fast_zputfield: 179 case Bytecodes::_fast_cputfield: 180 case Bytecodes::_fast_dputfield: 181 case Bytecodes::_fast_fputfield: 182 case Bytecodes::_fast_iputfield: 183 case Bytecodes::_fast_lputfield: 184 case Bytecodes::_fast_sputfield: 185 { 186 // We skip bytecode quickening for putfield instructions when 187 // the put_code written to the constant pool cache is zero. 188 // This is required so that every execution of this instruction 189 // calls out to InterpreterRuntime::resolve_get_put to do 190 // additional, required work. 191 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 192 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 193 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 194 __ set(bc, bc_reg); 195 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 196 } 197 break; 198 default: 199 assert(byte_no == -1, "sanity"); 200 if (load_bc_into_bc_reg) { 201 __ set(bc, bc_reg); 202 } 203 } 204 205 if (JvmtiExport::can_post_breakpoint()) { 206 Label L_fast_patch; 207 __ ldub(at_bcp(0), temp_reg); 208 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 209 // perform the quickening, slowly, in the bowels of the breakpoint table 210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 211 __ ba_short(L_patch_done); 212 __ bind(L_fast_patch); 213 } 214 215 #ifdef ASSERT 216 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 217 Label L_okay; 218 __ ldub(at_bcp(0), temp_reg); 219 __ cmp(temp_reg, orig_bytecode); 220 __ br(Assembler::equal, false, Assembler::pt, L_okay); 221 __ delayed()->cmp(temp_reg, bc_reg); 222 __ br(Assembler::equal, false, Assembler::pt, L_okay); 223 __ delayed()->nop(); 224 __ stop("patching the wrong bytecode"); 225 __ bind(L_okay); 226 #endif 227 228 // patch bytecode 229 __ stb(bc_reg, at_bcp(0)); 230 __ bind(L_patch_done); 231 } 232 233 //---------------------------------------------------------------------------------------------------- 234 // Individual instructions 235 236 void TemplateTable::nop() { 237 transition(vtos, vtos); 238 // nothing to do 239 } 240 241 void TemplateTable::shouldnotreachhere() { 242 transition(vtos, vtos); 243 __ stop("shouldnotreachhere bytecode"); 244 } 245 246 void TemplateTable::aconst_null() { 247 transition(vtos, atos); 248 __ clr(Otos_i); 249 } 250 251 252 void TemplateTable::iconst(int value) { 253 transition(vtos, itos); 254 __ set(value, Otos_i); 255 } 256 257 258 void TemplateTable::lconst(int value) { 259 transition(vtos, ltos); 260 assert(value >= 0, "check this code"); 261 #ifdef _LP64 262 __ set(value, Otos_l); 263 #else 264 __ set(value, Otos_l2); 265 __ clr( Otos_l1); 266 #endif 267 } 268 269 270 void TemplateTable::fconst(int value) { 271 transition(vtos, ftos); 272 static float zero = 0.0, one = 1.0, two = 2.0; 273 float* p; 274 switch( value ) { 275 default: ShouldNotReachHere(); 276 case 0: p = &zero; break; 277 case 1: p = &one; break; 278 case 2: p = &two; break; 279 } 280 AddressLiteral a(p); 281 __ sethi(a, G3_scratch); 282 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 283 } 284 285 286 void TemplateTable::dconst(int value) { 287 transition(vtos, dtos); 288 static double zero = 0.0, one = 1.0; 289 double* p; 290 switch( value ) { 291 default: ShouldNotReachHere(); 292 case 0: p = &zero; break; 293 case 1: p = &one; break; 294 } 295 AddressLiteral a(p); 296 __ sethi(a, G3_scratch); 297 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 298 } 299 300 301 // %%%%% Should factore most snippet templates across platforms 302 303 void TemplateTable::bipush() { 304 transition(vtos, itos); 305 __ ldsb( at_bcp(1), Otos_i ); 306 } 307 308 void TemplateTable::sipush() { 309 transition(vtos, itos); 310 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 311 } 312 313 void TemplateTable::ldc(bool wide) { 314 transition(vtos, vtos); 315 Label call_ldc, notInt, isString, notString, notClass, exit; 316 317 if (wide) { 318 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 319 } else { 320 __ ldub(Lbcp, 1, O1); 321 } 322 __ get_cpool_and_tags(O0, O2); 323 324 const int base_offset = ConstantPool::header_size() * wordSize; 325 const int tags_offset = Array<u1>::base_offset_in_bytes(); 326 327 // get type from tags 328 __ add(O2, tags_offset, O2); 329 __ ldub(O2, O1, O2); 330 331 // unresolved class? If so, must resolve 332 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 333 334 // unresolved class in error state 335 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 336 337 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 338 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 339 __ delayed()->add(O0, base_offset, O0); 340 341 __ bind(call_ldc); 342 __ set(wide, O1); 343 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 344 __ push(atos); 345 __ ba_short(exit); 346 347 __ bind(notClass); 348 // __ add(O0, base_offset, O0); 349 __ sll(O1, LogBytesPerWord, O1); 350 __ cmp(O2, JVM_CONSTANT_Integer); 351 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 352 __ delayed()->cmp(O2, JVM_CONSTANT_String); 353 __ ld(O0, O1, Otos_i); 354 __ push(itos); 355 __ ba_short(exit); 356 357 __ bind(notInt); 358 // __ cmp(O2, JVM_CONSTANT_String); 359 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 360 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 361 __ bind(isString); 362 __ stop("string should be rewritten to fast_aldc"); 363 __ ba_short(exit); 364 365 __ bind(notString); 366 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 367 __ push(ftos); 368 369 __ bind(exit); 370 } 371 372 // Fast path for caching oop constants. 373 // %%% We should use this to handle Class and String constants also. 374 // %%% It will simplify the ldc/primitive path considerably. 375 void TemplateTable::fast_aldc(bool wide) { 376 transition(vtos, atos); 377 378 int index_size = wide ? sizeof(u2) : sizeof(u1); 379 Label resolved; 380 381 // We are resolved if the resolved reference cache entry contains a 382 // non-null object (CallSite, etc.) 383 assert_different_registers(Otos_i, G3_scratch); 384 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 385 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 386 __ tst(Otos_i); 387 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 388 __ delayed()->set((int)bytecode(), O1); 389 390 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 391 392 // first time invocation - must resolve first 393 __ call_VM(Otos_i, entry, O1); 394 __ bind(resolved); 395 __ verify_oop(Otos_i); 396 } 397 398 399 void TemplateTable::ldc2_w() { 400 transition(vtos, vtos); 401 Label Long, exit; 402 403 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 404 __ get_cpool_and_tags(O0, O2); 405 406 const int base_offset = ConstantPool::header_size() * wordSize; 407 const int tags_offset = Array<u1>::base_offset_in_bytes(); 408 // get type from tags 409 __ add(O2, tags_offset, O2); 410 __ ldub(O2, O1, O2); 411 412 __ sll(O1, LogBytesPerWord, O1); 413 __ add(O0, O1, G3_scratch); 414 415 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 416 // A double can be placed at word-aligned locations in the constant pool. 417 // Check out Conversions.java for an example. 418 // Also ConstantPool::header_size() is 20, which makes it very difficult 419 // to double-align double on the constant pool. SG, 11/7/97 420 #ifdef _LP64 421 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 422 #else 423 FloatRegister f = Ftos_d; 424 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f); 425 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2, 426 f->successor()); 427 #endif 428 __ push(dtos); 429 __ ba_short(exit); 430 431 __ bind(Long); 432 #ifdef _LP64 433 __ ldx(G3_scratch, base_offset, Otos_l); 434 #else 435 __ ld(G3_scratch, base_offset, Otos_l); 436 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor()); 437 #endif 438 __ push(ltos); 439 440 __ bind(exit); 441 } 442 443 444 void TemplateTable::locals_index(Register reg, int offset) { 445 __ ldub( at_bcp(offset), reg ); 446 } 447 448 449 void TemplateTable::locals_index_wide(Register reg) { 450 // offset is 2, not 1, because Lbcp points to wide prefix code 451 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 452 } 453 454 void TemplateTable::iload() { 455 transition(vtos, itos); 456 // Rewrite iload,iload pair into fast_iload2 457 // iload,caload pair into fast_icaload 458 if (RewriteFrequentPairs) { 459 Label rewrite, done; 460 461 // get next byte 462 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 463 464 // if _iload, wait to rewrite to iload2. We only want to rewrite the 465 // last two iloads in a pair. Comparing against fast_iload means that 466 // the next bytecode is neither an iload or a caload, and therefore 467 // an iload pair. 468 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 469 470 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 471 __ br(Assembler::equal, false, Assembler::pn, rewrite); 472 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 473 474 __ cmp(G3_scratch, (int)Bytecodes::_caload); 475 __ br(Assembler::equal, false, Assembler::pn, rewrite); 476 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 477 478 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 479 // rewrite 480 // G4_scratch: fast bytecode 481 __ bind(rewrite); 482 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 483 __ bind(done); 484 } 485 486 // Get the local value into tos 487 locals_index(G3_scratch); 488 __ access_local_int( G3_scratch, Otos_i ); 489 } 490 491 void TemplateTable::fast_iload2() { 492 transition(vtos, itos); 493 locals_index(G3_scratch); 494 __ access_local_int( G3_scratch, Otos_i ); 495 __ push_i(); 496 locals_index(G3_scratch, 3); // get next bytecode's local index. 497 __ access_local_int( G3_scratch, Otos_i ); 498 } 499 500 void TemplateTable::fast_iload() { 501 transition(vtos, itos); 502 locals_index(G3_scratch); 503 __ access_local_int( G3_scratch, Otos_i ); 504 } 505 506 void TemplateTable::lload() { 507 transition(vtos, ltos); 508 locals_index(G3_scratch); 509 __ access_local_long( G3_scratch, Otos_l ); 510 } 511 512 513 void TemplateTable::fload() { 514 transition(vtos, ftos); 515 locals_index(G3_scratch); 516 __ access_local_float( G3_scratch, Ftos_f ); 517 } 518 519 520 void TemplateTable::dload() { 521 transition(vtos, dtos); 522 locals_index(G3_scratch); 523 __ access_local_double( G3_scratch, Ftos_d ); 524 } 525 526 527 void TemplateTable::aload() { 528 transition(vtos, atos); 529 locals_index(G3_scratch); 530 __ access_local_ptr( G3_scratch, Otos_i); 531 } 532 533 534 void TemplateTable::wide_iload() { 535 transition(vtos, itos); 536 locals_index_wide(G3_scratch); 537 __ access_local_int( G3_scratch, Otos_i ); 538 } 539 540 541 void TemplateTable::wide_lload() { 542 transition(vtos, ltos); 543 locals_index_wide(G3_scratch); 544 __ access_local_long( G3_scratch, Otos_l ); 545 } 546 547 548 void TemplateTable::wide_fload() { 549 transition(vtos, ftos); 550 locals_index_wide(G3_scratch); 551 __ access_local_float( G3_scratch, Ftos_f ); 552 } 553 554 555 void TemplateTable::wide_dload() { 556 transition(vtos, dtos); 557 locals_index_wide(G3_scratch); 558 __ access_local_double( G3_scratch, Ftos_d ); 559 } 560 561 562 void TemplateTable::wide_aload() { 563 transition(vtos, atos); 564 locals_index_wide(G3_scratch); 565 __ access_local_ptr( G3_scratch, Otos_i ); 566 __ verify_oop(Otos_i); 567 } 568 569 570 void TemplateTable::iaload() { 571 transition(itos, itos); 572 // Otos_i: index 573 // tos: array 574 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 575 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 576 } 577 578 579 void TemplateTable::laload() { 580 transition(itos, ltos); 581 // Otos_i: index 582 // O2: array 583 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 584 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 585 } 586 587 588 void TemplateTable::faload() { 589 transition(itos, ftos); 590 // Otos_i: index 591 // O2: array 592 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 593 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 594 } 595 596 597 void TemplateTable::daload() { 598 transition(itos, dtos); 599 // Otos_i: index 600 // O2: array 601 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 602 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 603 } 604 605 606 void TemplateTable::aaload() { 607 transition(itos, atos); 608 // Otos_i: index 609 // tos: array 610 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 611 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 612 __ verify_oop(Otos_i); 613 } 614 615 616 void TemplateTable::baload() { 617 transition(itos, itos); 618 // Otos_i: index 619 // tos: array 620 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 621 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 622 } 623 624 625 void TemplateTable::caload() { 626 transition(itos, itos); 627 // Otos_i: index 628 // tos: array 629 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 630 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 631 } 632 633 void TemplateTable::fast_icaload() { 634 transition(vtos, itos); 635 // Otos_i: index 636 // tos: array 637 locals_index(G3_scratch); 638 __ access_local_int( G3_scratch, Otos_i ); 639 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 640 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 641 } 642 643 644 void TemplateTable::saload() { 645 transition(itos, itos); 646 // Otos_i: index 647 // tos: array 648 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 649 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 650 } 651 652 653 void TemplateTable::iload(int n) { 654 transition(vtos, itos); 655 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 656 } 657 658 659 void TemplateTable::lload(int n) { 660 transition(vtos, ltos); 661 assert(n+1 < Argument::n_register_parameters, "would need more code"); 662 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 663 } 664 665 666 void TemplateTable::fload(int n) { 667 transition(vtos, ftos); 668 assert(n < Argument::n_register_parameters, "would need more code"); 669 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 670 } 671 672 673 void TemplateTable::dload(int n) { 674 transition(vtos, dtos); 675 FloatRegister dst = Ftos_d; 676 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 677 } 678 679 680 void TemplateTable::aload(int n) { 681 transition(vtos, atos); 682 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 683 } 684 685 686 void TemplateTable::aload_0() { 687 transition(vtos, atos); 688 689 // According to bytecode histograms, the pairs: 690 // 691 // _aload_0, _fast_igetfield (itos) 692 // _aload_0, _fast_agetfield (atos) 693 // _aload_0, _fast_fgetfield (ftos) 694 // 695 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 696 // bytecode checks the next bytecode and then rewrites the current 697 // bytecode into a pair bytecode; otherwise it rewrites the current 698 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 699 // 700 if (RewriteFrequentPairs) { 701 Label rewrite, done; 702 703 // get next byte 704 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 705 706 // do actual aload_0 707 aload(0); 708 709 // if _getfield then wait with rewrite 710 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 711 712 // if _igetfield then rewrite to _fast_iaccess_0 713 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 714 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 715 __ br(Assembler::equal, false, Assembler::pn, rewrite); 716 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 717 718 // if _agetfield then rewrite to _fast_aaccess_0 719 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 720 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 721 __ br(Assembler::equal, false, Assembler::pn, rewrite); 722 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 723 724 // if _fgetfield then rewrite to _fast_faccess_0 725 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 726 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 727 __ br(Assembler::equal, false, Assembler::pn, rewrite); 728 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 729 730 // else rewrite to _fast_aload0 731 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 732 __ set(Bytecodes::_fast_aload_0, G4_scratch); 733 734 // rewrite 735 // G4_scratch: fast bytecode 736 __ bind(rewrite); 737 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 738 __ bind(done); 739 } else { 740 aload(0); 741 } 742 } 743 744 745 void TemplateTable::istore() { 746 transition(itos, vtos); 747 locals_index(G3_scratch); 748 __ store_local_int( G3_scratch, Otos_i ); 749 } 750 751 752 void TemplateTable::lstore() { 753 transition(ltos, vtos); 754 locals_index(G3_scratch); 755 __ store_local_long( G3_scratch, Otos_l ); 756 } 757 758 759 void TemplateTable::fstore() { 760 transition(ftos, vtos); 761 locals_index(G3_scratch); 762 __ store_local_float( G3_scratch, Ftos_f ); 763 } 764 765 766 void TemplateTable::dstore() { 767 transition(dtos, vtos); 768 locals_index(G3_scratch); 769 __ store_local_double( G3_scratch, Ftos_d ); 770 } 771 772 773 void TemplateTable::astore() { 774 transition(vtos, vtos); 775 __ load_ptr(0, Otos_i); 776 __ inc(Lesp, Interpreter::stackElementSize); 777 __ verify_oop_or_return_address(Otos_i, G3_scratch); 778 locals_index(G3_scratch); 779 __ store_local_ptr(G3_scratch, Otos_i); 780 } 781 782 783 void TemplateTable::wide_istore() { 784 transition(vtos, vtos); 785 __ pop_i(); 786 locals_index_wide(G3_scratch); 787 __ store_local_int( G3_scratch, Otos_i ); 788 } 789 790 791 void TemplateTable::wide_lstore() { 792 transition(vtos, vtos); 793 __ pop_l(); 794 locals_index_wide(G3_scratch); 795 __ store_local_long( G3_scratch, Otos_l ); 796 } 797 798 799 void TemplateTable::wide_fstore() { 800 transition(vtos, vtos); 801 __ pop_f(); 802 locals_index_wide(G3_scratch); 803 __ store_local_float( G3_scratch, Ftos_f ); 804 } 805 806 807 void TemplateTable::wide_dstore() { 808 transition(vtos, vtos); 809 __ pop_d(); 810 locals_index_wide(G3_scratch); 811 __ store_local_double( G3_scratch, Ftos_d ); 812 } 813 814 815 void TemplateTable::wide_astore() { 816 transition(vtos, vtos); 817 __ load_ptr(0, Otos_i); 818 __ inc(Lesp, Interpreter::stackElementSize); 819 __ verify_oop_or_return_address(Otos_i, G3_scratch); 820 locals_index_wide(G3_scratch); 821 __ store_local_ptr(G3_scratch, Otos_i); 822 } 823 824 825 void TemplateTable::iastore() { 826 transition(itos, vtos); 827 __ pop_i(O2); // index 828 // Otos_i: val 829 // O3: array 830 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 831 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 832 } 833 834 835 void TemplateTable::lastore() { 836 transition(ltos, vtos); 837 __ pop_i(O2); // index 838 // Otos_l: val 839 // O3: array 840 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 841 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 842 } 843 844 845 void TemplateTable::fastore() { 846 transition(ftos, vtos); 847 __ pop_i(O2); // index 848 // Ftos_f: val 849 // O3: array 850 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 851 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 852 } 853 854 855 void TemplateTable::dastore() { 856 transition(dtos, vtos); 857 __ pop_i(O2); // index 858 // Fos_d: val 859 // O3: array 860 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 861 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 862 } 863 864 865 void TemplateTable::aastore() { 866 Label store_ok, is_null, done; 867 transition(vtos, vtos); 868 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 869 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 870 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 871 // Otos_i: val 872 // O2: index 873 // O3: array 874 __ verify_oop(Otos_i); 875 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 876 877 // do array store check - check for NULL value first 878 __ br_null_short( Otos_i, Assembler::pn, is_null ); 879 880 __ load_klass(O3, O4); // get array klass 881 __ load_klass(Otos_i, O5); // get value klass 882 883 // do fast instanceof cache test 884 885 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 886 887 assert(Otos_i == O0, "just checking"); 888 889 // Otos_i: value 890 // O1: addr - offset 891 // O2: index 892 // O3: array 893 // O4: array element klass 894 // O5: value klass 895 896 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 897 898 // Generate a fast subtype check. Branch to store_ok if no 899 // failure. Throw if failure. 900 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 901 902 // Not a subtype; so must throw exception 903 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 904 905 // Store is OK. 906 __ bind(store_ok); 907 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 908 909 __ ba(done); 910 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 911 912 __ bind(is_null); 913 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 914 915 __ profile_null_seen(G3_scratch); 916 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 917 __ bind(done); 918 } 919 920 921 void TemplateTable::bastore() { 922 transition(itos, vtos); 923 __ pop_i(O2); // index 924 // Otos_i: val 925 // O2: index 926 // O3: array 927 __ index_check(O3, O2, 0, G3_scratch, O2); 928 // Need to check whether array is boolean or byte 929 // since both types share the bastore bytecode. 930 __ load_klass(O3, G4_scratch); 931 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch); 932 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch); 933 __ andcc(G3_scratch, G4_scratch, G0); 934 Label L_skip; 935 __ br(Assembler::zero, false, Assembler::pn, L_skip); 936 __ delayed()->nop(); 937 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1 938 __ bind(L_skip); 939 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 940 } 941 942 943 void TemplateTable::castore() { 944 transition(itos, vtos); 945 __ pop_i(O2); // index 946 // Otos_i: val 947 // O3: array 948 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 949 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 950 } 951 952 953 void TemplateTable::sastore() { 954 // %%%%% Factor across platform 955 castore(); 956 } 957 958 959 void TemplateTable::istore(int n) { 960 transition(itos, vtos); 961 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 962 } 963 964 965 void TemplateTable::lstore(int n) { 966 transition(ltos, vtos); 967 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 968 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 969 970 } 971 972 973 void TemplateTable::fstore(int n) { 974 transition(ftos, vtos); 975 assert(n < Argument::n_register_parameters, "only handle register cases"); 976 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 977 } 978 979 980 void TemplateTable::dstore(int n) { 981 transition(dtos, vtos); 982 FloatRegister src = Ftos_d; 983 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 984 } 985 986 987 void TemplateTable::astore(int n) { 988 transition(vtos, vtos); 989 __ load_ptr(0, Otos_i); 990 __ inc(Lesp, Interpreter::stackElementSize); 991 __ verify_oop_or_return_address(Otos_i, G3_scratch); 992 __ store_local_ptr(n, Otos_i); 993 } 994 995 996 void TemplateTable::pop() { 997 transition(vtos, vtos); 998 __ inc(Lesp, Interpreter::stackElementSize); 999 } 1000 1001 1002 void TemplateTable::pop2() { 1003 transition(vtos, vtos); 1004 __ inc(Lesp, 2 * Interpreter::stackElementSize); 1005 } 1006 1007 1008 void TemplateTable::dup() { 1009 transition(vtos, vtos); 1010 // stack: ..., a 1011 // load a and tag 1012 __ load_ptr(0, Otos_i); 1013 __ push_ptr(Otos_i); 1014 // stack: ..., a, a 1015 } 1016 1017 1018 void TemplateTable::dup_x1() { 1019 transition(vtos, vtos); 1020 // stack: ..., a, b 1021 __ load_ptr( 1, G3_scratch); // get a 1022 __ load_ptr( 0, Otos_l1); // get b 1023 __ store_ptr(1, Otos_l1); // put b 1024 __ store_ptr(0, G3_scratch); // put a - like swap 1025 __ push_ptr(Otos_l1); // push b 1026 // stack: ..., b, a, b 1027 } 1028 1029 1030 void TemplateTable::dup_x2() { 1031 transition(vtos, vtos); 1032 // stack: ..., a, b, c 1033 // get c and push on stack, reuse registers 1034 __ load_ptr( 0, G3_scratch); // get c 1035 __ push_ptr(G3_scratch); // push c with tag 1036 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1037 // (stack offsets n+1 now) 1038 __ load_ptr( 3, Otos_l1); // get a 1039 __ store_ptr(3, G3_scratch); // put c at 3 1040 // stack: ..., c, b, c, c (a in reg) 1041 __ load_ptr( 2, G3_scratch); // get b 1042 __ store_ptr(2, Otos_l1); // put a at 2 1043 // stack: ..., c, a, c, c (b in reg) 1044 __ store_ptr(1, G3_scratch); // put b at 1 1045 // stack: ..., c, a, b, c 1046 } 1047 1048 1049 void TemplateTable::dup2() { 1050 transition(vtos, vtos); 1051 __ load_ptr(1, G3_scratch); // get a 1052 __ load_ptr(0, Otos_l1); // get b 1053 __ push_ptr(G3_scratch); // push a 1054 __ push_ptr(Otos_l1); // push b 1055 // stack: ..., a, b, a, b 1056 } 1057 1058 1059 void TemplateTable::dup2_x1() { 1060 transition(vtos, vtos); 1061 // stack: ..., a, b, c 1062 __ load_ptr( 1, Lscratch); // get b 1063 __ load_ptr( 2, Otos_l1); // get a 1064 __ store_ptr(2, Lscratch); // put b at a 1065 // stack: ..., b, b, c 1066 __ load_ptr( 0, G3_scratch); // get c 1067 __ store_ptr(1, G3_scratch); // put c at b 1068 // stack: ..., b, c, c 1069 __ store_ptr(0, Otos_l1); // put a at c 1070 // stack: ..., b, c, a 1071 __ push_ptr(Lscratch); // push b 1072 __ push_ptr(G3_scratch); // push c 1073 // stack: ..., b, c, a, b, c 1074 } 1075 1076 1077 // The spec says that these types can be a mixture of category 1 (1 word) 1078 // types and/or category 2 types (long and doubles) 1079 void TemplateTable::dup2_x2() { 1080 transition(vtos, vtos); 1081 // stack: ..., a, b, c, d 1082 __ load_ptr( 1, Lscratch); // get c 1083 __ load_ptr( 3, Otos_l1); // get a 1084 __ store_ptr(3, Lscratch); // put c at 3 1085 __ store_ptr(1, Otos_l1); // put a at 1 1086 // stack: ..., c, b, a, d 1087 __ load_ptr( 2, G3_scratch); // get b 1088 __ load_ptr( 0, Otos_l1); // get d 1089 __ store_ptr(0, G3_scratch); // put b at 0 1090 __ store_ptr(2, Otos_l1); // put d at 2 1091 // stack: ..., c, d, a, b 1092 __ push_ptr(Lscratch); // push c 1093 __ push_ptr(Otos_l1); // push d 1094 // stack: ..., c, d, a, b, c, d 1095 } 1096 1097 1098 void TemplateTable::swap() { 1099 transition(vtos, vtos); 1100 // stack: ..., a, b 1101 __ load_ptr( 1, G3_scratch); // get a 1102 __ load_ptr( 0, Otos_l1); // get b 1103 __ store_ptr(0, G3_scratch); // put b 1104 __ store_ptr(1, Otos_l1); // put a 1105 // stack: ..., b, a 1106 } 1107 1108 1109 void TemplateTable::iop2(Operation op) { 1110 transition(itos, itos); 1111 __ pop_i(O1); 1112 switch (op) { 1113 case add: __ add(O1, Otos_i, Otos_i); break; 1114 case sub: __ sub(O1, Otos_i, Otos_i); break; 1115 // %%%%% Mul may not exist: better to call .mul? 1116 case mul: __ smul(O1, Otos_i, Otos_i); break; 1117 case _and: __ and3(O1, Otos_i, Otos_i); break; 1118 case _or: __ or3(O1, Otos_i, Otos_i); break; 1119 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1120 case shl: __ sll(O1, Otos_i, Otos_i); break; 1121 case shr: __ sra(O1, Otos_i, Otos_i); break; 1122 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1123 default: ShouldNotReachHere(); 1124 } 1125 } 1126 1127 1128 void TemplateTable::lop2(Operation op) { 1129 transition(ltos, ltos); 1130 __ pop_l(O2); 1131 switch (op) { 1132 #ifdef _LP64 1133 case add: __ add(O2, Otos_l, Otos_l); break; 1134 case sub: __ sub(O2, Otos_l, Otos_l); break; 1135 case _and: __ and3(O2, Otos_l, Otos_l); break; 1136 case _or: __ or3(O2, Otos_l, Otos_l); break; 1137 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1138 #else 1139 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break; 1140 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break; 1141 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break; 1142 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break; 1143 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break; 1144 #endif 1145 default: ShouldNotReachHere(); 1146 } 1147 } 1148 1149 1150 void TemplateTable::idiv() { 1151 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1152 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1153 1154 transition(itos, itos); 1155 __ pop_i(O1); // get 1st op 1156 1157 // Y contains upper 32 bits of result, set it to 0 or all ones 1158 __ wry(G0); 1159 __ mov(~0, G3_scratch); 1160 1161 __ tst(O1); 1162 Label neg; 1163 __ br(Assembler::negative, true, Assembler::pn, neg); 1164 __ delayed()->wry(G3_scratch); 1165 __ bind(neg); 1166 1167 Label ok; 1168 __ tst(Otos_i); 1169 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1170 1171 const int min_int = 0x80000000; 1172 Label regular; 1173 __ cmp(Otos_i, -1); 1174 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1175 #ifdef _LP64 1176 // Don't put set in delay slot 1177 // Set will turn into multiple instructions in 64 bit mode 1178 __ delayed()->nop(); 1179 __ set(min_int, G4_scratch); 1180 #else 1181 __ delayed()->set(min_int, G4_scratch); 1182 #endif 1183 Label done; 1184 __ cmp(O1, G4_scratch); 1185 __ br(Assembler::equal, true, Assembler::pt, done); 1186 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1187 1188 __ bind(regular); 1189 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1190 __ bind(done); 1191 } 1192 1193 1194 void TemplateTable::irem() { 1195 transition(itos, itos); 1196 __ mov(Otos_i, O2); // save divisor 1197 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1198 __ smul(Otos_i, O2, Otos_i); 1199 __ sub(O1, Otos_i, Otos_i); 1200 } 1201 1202 1203 void TemplateTable::lmul() { 1204 transition(ltos, ltos); 1205 __ pop_l(O2); 1206 #ifdef _LP64 1207 __ mulx(Otos_l, O2, Otos_l); 1208 #else 1209 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul)); 1210 #endif 1211 1212 } 1213 1214 1215 void TemplateTable::ldiv() { 1216 transition(ltos, ltos); 1217 1218 // check for zero 1219 __ pop_l(O2); 1220 #ifdef _LP64 1221 __ tst(Otos_l); 1222 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1223 __ sdivx(O2, Otos_l, Otos_l); 1224 #else 1225 __ orcc(Otos_l1, Otos_l2, G0); 1226 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1227 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); 1228 #endif 1229 } 1230 1231 1232 void TemplateTable::lrem() { 1233 transition(ltos, ltos); 1234 1235 // check for zero 1236 __ pop_l(O2); 1237 #ifdef _LP64 1238 __ tst(Otos_l); 1239 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1240 __ sdivx(O2, Otos_l, Otos_l2); 1241 __ mulx (Otos_l2, Otos_l, Otos_l2); 1242 __ sub (O2, Otos_l2, Otos_l); 1243 #else 1244 __ orcc(Otos_l1, Otos_l2, G0); 1245 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1246 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); 1247 #endif 1248 } 1249 1250 1251 void TemplateTable::lshl() { 1252 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1253 1254 __ pop_l(O2); // shift value in O2, O3 1255 #ifdef _LP64 1256 __ sllx(O2, Otos_i, Otos_l); 1257 #else 1258 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1259 #endif 1260 } 1261 1262 1263 void TemplateTable::lshr() { 1264 transition(itos, ltos); // %%%% see lshl comment 1265 1266 __ pop_l(O2); // shift value in O2, O3 1267 #ifdef _LP64 1268 __ srax(O2, Otos_i, Otos_l); 1269 #else 1270 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1271 #endif 1272 } 1273 1274 1275 1276 void TemplateTable::lushr() { 1277 transition(itos, ltos); // %%%% see lshl comment 1278 1279 __ pop_l(O2); // shift value in O2, O3 1280 #ifdef _LP64 1281 __ srlx(O2, Otos_i, Otos_l); 1282 #else 1283 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1284 #endif 1285 } 1286 1287 1288 void TemplateTable::fop2(Operation op) { 1289 transition(ftos, ftos); 1290 switch (op) { 1291 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1292 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1293 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1294 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1295 case rem: 1296 assert(Ftos_f == F0, "just checking"); 1297 #ifdef _LP64 1298 // LP64 calling conventions use F1, F3 for passing 2 floats 1299 __ pop_f(F1); 1300 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1301 #else 1302 __ pop_i(O0); 1303 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp); 1304 __ ld( __ d_tmp, O1 ); 1305 #endif 1306 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1307 assert( Ftos_f == F0, "fix this code" ); 1308 break; 1309 1310 default: ShouldNotReachHere(); 1311 } 1312 } 1313 1314 1315 void TemplateTable::dop2(Operation op) { 1316 transition(dtos, dtos); 1317 switch (op) { 1318 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1319 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1320 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1321 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1322 case rem: 1323 #ifdef _LP64 1324 // Pass arguments in D0, D2 1325 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1326 __ pop_d( F0 ); 1327 #else 1328 // Pass arguments in O0O1, O2O3 1329 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1330 __ ldd( __ d_tmp, O2 ); 1331 __ pop_d(Ftos_f); 1332 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1333 __ ldd( __ d_tmp, O0 ); 1334 #endif 1335 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1336 assert( Ftos_d == F0, "fix this code" ); 1337 break; 1338 1339 default: ShouldNotReachHere(); 1340 } 1341 } 1342 1343 1344 void TemplateTable::ineg() { 1345 transition(itos, itos); 1346 __ neg(Otos_i); 1347 } 1348 1349 1350 void TemplateTable::lneg() { 1351 transition(ltos, ltos); 1352 #ifdef _LP64 1353 __ sub(G0, Otos_l, Otos_l); 1354 #else 1355 __ lneg(Otos_l1, Otos_l2); 1356 #endif 1357 } 1358 1359 1360 void TemplateTable::fneg() { 1361 transition(ftos, ftos); 1362 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1363 } 1364 1365 1366 void TemplateTable::dneg() { 1367 transition(dtos, dtos); 1368 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1369 } 1370 1371 1372 void TemplateTable::iinc() { 1373 transition(vtos, vtos); 1374 locals_index(G3_scratch); 1375 __ ldsb(Lbcp, 2, O2); // load constant 1376 __ access_local_int(G3_scratch, Otos_i); 1377 __ add(Otos_i, O2, Otos_i); 1378 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1379 } 1380 1381 1382 void TemplateTable::wide_iinc() { 1383 transition(vtos, vtos); 1384 locals_index_wide(G3_scratch); 1385 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1386 __ access_local_int(G3_scratch, Otos_i); 1387 __ add(Otos_i, O3, Otos_i); 1388 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1389 } 1390 1391 1392 void TemplateTable::convert() { 1393 // %%%%% Factor this first part accross platforms 1394 #ifdef ASSERT 1395 TosState tos_in = ilgl; 1396 TosState tos_out = ilgl; 1397 switch (bytecode()) { 1398 case Bytecodes::_i2l: // fall through 1399 case Bytecodes::_i2f: // fall through 1400 case Bytecodes::_i2d: // fall through 1401 case Bytecodes::_i2b: // fall through 1402 case Bytecodes::_i2c: // fall through 1403 case Bytecodes::_i2s: tos_in = itos; break; 1404 case Bytecodes::_l2i: // fall through 1405 case Bytecodes::_l2f: // fall through 1406 case Bytecodes::_l2d: tos_in = ltos; break; 1407 case Bytecodes::_f2i: // fall through 1408 case Bytecodes::_f2l: // fall through 1409 case Bytecodes::_f2d: tos_in = ftos; break; 1410 case Bytecodes::_d2i: // fall through 1411 case Bytecodes::_d2l: // fall through 1412 case Bytecodes::_d2f: tos_in = dtos; break; 1413 default : ShouldNotReachHere(); 1414 } 1415 switch (bytecode()) { 1416 case Bytecodes::_l2i: // fall through 1417 case Bytecodes::_f2i: // fall through 1418 case Bytecodes::_d2i: // fall through 1419 case Bytecodes::_i2b: // fall through 1420 case Bytecodes::_i2c: // fall through 1421 case Bytecodes::_i2s: tos_out = itos; break; 1422 case Bytecodes::_i2l: // fall through 1423 case Bytecodes::_f2l: // fall through 1424 case Bytecodes::_d2l: tos_out = ltos; break; 1425 case Bytecodes::_i2f: // fall through 1426 case Bytecodes::_l2f: // fall through 1427 case Bytecodes::_d2f: tos_out = ftos; break; 1428 case Bytecodes::_i2d: // fall through 1429 case Bytecodes::_l2d: // fall through 1430 case Bytecodes::_f2d: tos_out = dtos; break; 1431 default : ShouldNotReachHere(); 1432 } 1433 transition(tos_in, tos_out); 1434 #endif 1435 1436 1437 // Conversion 1438 Label done; 1439 switch (bytecode()) { 1440 case Bytecodes::_i2l: 1441 #ifdef _LP64 1442 // Sign extend the 32 bits 1443 __ sra ( Otos_i, 0, Otos_l ); 1444 #else 1445 __ addcc(Otos_i, 0, Otos_l2); 1446 __ br(Assembler::greaterEqual, true, Assembler::pt, done); 1447 __ delayed()->clr(Otos_l1); 1448 __ set(~0, Otos_l1); 1449 #endif 1450 break; 1451 1452 case Bytecodes::_i2f: 1453 __ st(Otos_i, __ d_tmp ); 1454 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1455 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1456 break; 1457 1458 case Bytecodes::_i2d: 1459 __ st(Otos_i, __ d_tmp); 1460 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1461 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1462 break; 1463 1464 case Bytecodes::_i2b: 1465 __ sll(Otos_i, 24, Otos_i); 1466 __ sra(Otos_i, 24, Otos_i); 1467 break; 1468 1469 case Bytecodes::_i2c: 1470 __ sll(Otos_i, 16, Otos_i); 1471 __ srl(Otos_i, 16, Otos_i); 1472 break; 1473 1474 case Bytecodes::_i2s: 1475 __ sll(Otos_i, 16, Otos_i); 1476 __ sra(Otos_i, 16, Otos_i); 1477 break; 1478 1479 case Bytecodes::_l2i: 1480 #ifndef _LP64 1481 __ mov(Otos_l2, Otos_i); 1482 #else 1483 // Sign-extend into the high 32 bits 1484 __ sra(Otos_l, 0, Otos_i); 1485 #endif 1486 break; 1487 1488 case Bytecodes::_l2f: 1489 case Bytecodes::_l2d: 1490 __ st_long(Otos_l, __ d_tmp); 1491 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1492 1493 if (bytecode() == Bytecodes::_l2f) { 1494 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1495 } else { 1496 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1497 } 1498 break; 1499 1500 case Bytecodes::_f2i: { 1501 Label isNaN; 1502 // result must be 0 if value is NaN; test by comparing value to itself 1503 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1504 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1505 __ delayed()->clr(Otos_i); // NaN 1506 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1507 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1508 __ ld(__ d_tmp, Otos_i); 1509 __ bind(isNaN); 1510 } 1511 break; 1512 1513 case Bytecodes::_f2l: 1514 // must uncache tos 1515 __ push_f(); 1516 #ifdef _LP64 1517 __ pop_f(F1); 1518 #else 1519 __ pop_i(O0); 1520 #endif 1521 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1522 break; 1523 1524 case Bytecodes::_f2d: 1525 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1526 break; 1527 1528 case Bytecodes::_d2i: 1529 case Bytecodes::_d2l: 1530 // must uncache tos 1531 __ push_d(); 1532 #ifdef _LP64 1533 // LP64 calling conventions pass first double arg in D0 1534 __ pop_d( Ftos_d ); 1535 #else 1536 __ pop_i( O0 ); 1537 __ pop_i( O1 ); 1538 #endif 1539 __ call_VM_leaf(Lscratch, 1540 bytecode() == Bytecodes::_d2i 1541 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1542 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1543 break; 1544 1545 case Bytecodes::_d2f: 1546 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1547 break; 1548 1549 default: ShouldNotReachHere(); 1550 } 1551 __ bind(done); 1552 } 1553 1554 1555 void TemplateTable::lcmp() { 1556 transition(ltos, itos); 1557 1558 #ifdef _LP64 1559 __ pop_l(O1); // pop off value 1, value 2 is in O0 1560 __ lcmp( O1, Otos_l, Otos_i ); 1561 #else 1562 __ pop_l(O2); // cmp O2,3 to O0,1 1563 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i ); 1564 #endif 1565 } 1566 1567 1568 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1569 1570 if (is_float) __ pop_f(F2); 1571 else __ pop_d(F2); 1572 1573 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1574 1575 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1576 } 1577 1578 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1579 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1580 __ verify_thread(); 1581 1582 const Register O2_bumped_count = O2; 1583 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1584 1585 // get (wide) offset to O1_disp 1586 const Register O1_disp = O1; 1587 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1588 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1589 1590 // Handle all the JSR stuff here, then exit. 1591 // It's much shorter and cleaner than intermingling with the 1592 // non-JSR normal-branch stuff occurring below. 1593 if( is_jsr ) { 1594 // compute return address as bci in Otos_i 1595 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1596 __ sub(Lbcp, G3_scratch, G3_scratch); 1597 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1598 1599 // Bump Lbcp to target of JSR 1600 __ add(Lbcp, O1_disp, Lbcp); 1601 // Push returnAddress for "ret" on stack 1602 __ push_ptr(Otos_i); 1603 // And away we go! 1604 __ dispatch_next(vtos); 1605 return; 1606 } 1607 1608 // Normal (non-jsr) branch handling 1609 1610 // Save the current Lbcp 1611 const Register l_cur_bcp = Lscratch; 1612 __ mov( Lbcp, l_cur_bcp ); 1613 1614 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1615 if ( increment_invocation_counter_for_backward_branches ) { 1616 Label Lforward; 1617 // check branch direction 1618 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1619 // Bump bytecode pointer by displacement (take the branch) 1620 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1621 1622 const Register Rcounters = G3_scratch; 1623 __ get_method_counters(Lmethod, Rcounters, Lforward); 1624 1625 if (TieredCompilation) { 1626 Label Lno_mdo, Loverflow; 1627 int increment = InvocationCounter::count_increment; 1628 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1629 if (ProfileInterpreter) { 1630 // If no method data exists, go to profile_continue. 1631 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1632 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1633 1634 // Increment backedge counter in the MDO 1635 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1636 in_bytes(InvocationCounter::counter_offset())); 1637 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1638 Assembler::notZero, &Lforward); 1639 __ ba_short(Loverflow); 1640 } 1641 1642 // If there's no MDO, increment counter in MethodCounters* 1643 __ bind(Lno_mdo); 1644 Address backedge_counter(Rcounters, 1645 in_bytes(MethodCounters::backedge_counter_offset()) + 1646 in_bytes(InvocationCounter::counter_offset())); 1647 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1648 Assembler::notZero, &Lforward); 1649 __ bind(Loverflow); 1650 1651 // notify point for loop, pass branch bytecode 1652 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1653 1654 // Was an OSR adapter generated? 1655 // O0 = osr nmethod 1656 __ br_null_short(O0, Assembler::pn, Lforward); 1657 1658 // Has the nmethod been invalidated already? 1659 __ ld(O0, nmethod::entry_bci_offset(), O2); 1660 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward); 1661 1662 // migrate the interpreter frame off of the stack 1663 1664 __ mov(G2_thread, L7); 1665 // save nmethod 1666 __ mov(O0, L6); 1667 __ set_last_Java_frame(SP, noreg); 1668 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1669 __ reset_last_Java_frame(); 1670 __ mov(L7, G2_thread); 1671 1672 // move OSR nmethod to I1 1673 __ mov(L6, I1); 1674 1675 // OSR buffer to I0 1676 __ mov(O0, I0); 1677 1678 // remove the interpreter frame 1679 __ restore(I5_savedSP, 0, SP); 1680 1681 // Jump to the osr code. 1682 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1683 __ jmp(O2, G0); 1684 __ delayed()->nop(); 1685 1686 } else { 1687 // Update Backedge branch separately from invocations 1688 const Register G4_invoke_ctr = G4; 1689 __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); 1690 if (ProfileInterpreter) { 1691 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); 1692 if (UseOnStackReplacement) { 1693 __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch); 1694 } 1695 } else { 1696 if (UseOnStackReplacement) { 1697 __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); 1698 } 1699 } 1700 } 1701 1702 __ bind(Lforward); 1703 } else 1704 // Bump bytecode pointer by displacement (take the branch) 1705 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1706 1707 // continue with bytecode @ target 1708 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1709 // %%%%% and changing dispatch_next to dispatch_only 1710 __ dispatch_next(vtos); 1711 } 1712 1713 1714 // Note Condition in argument is TemplateTable::Condition 1715 // arg scope is within class scope 1716 1717 void TemplateTable::if_0cmp(Condition cc) { 1718 // no pointers, integer only! 1719 transition(itos, vtos); 1720 // assume branch is more often taken than not (loops use backward branches) 1721 __ cmp( Otos_i, 0); 1722 __ if_cmp(ccNot(cc), false); 1723 } 1724 1725 1726 void TemplateTable::if_icmp(Condition cc) { 1727 transition(itos, vtos); 1728 __ pop_i(O1); 1729 __ cmp(O1, Otos_i); 1730 __ if_cmp(ccNot(cc), false); 1731 } 1732 1733 1734 void TemplateTable::if_nullcmp(Condition cc) { 1735 transition(atos, vtos); 1736 __ tst(Otos_i); 1737 __ if_cmp(ccNot(cc), true); 1738 } 1739 1740 1741 void TemplateTable::if_acmp(Condition cc) { 1742 transition(atos, vtos); 1743 __ pop_ptr(O1); 1744 __ verify_oop(O1); 1745 __ verify_oop(Otos_i); 1746 __ cmp(O1, Otos_i); 1747 __ if_cmp(ccNot(cc), true); 1748 } 1749 1750 1751 1752 void TemplateTable::ret() { 1753 transition(vtos, vtos); 1754 locals_index(G3_scratch); 1755 __ access_local_returnAddress(G3_scratch, Otos_i); 1756 // Otos_i contains the bci, compute the bcp from that 1757 1758 #ifdef _LP64 1759 #ifdef ASSERT 1760 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1761 // the result. The return address (really a BCI) was stored with an 1762 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1763 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1764 // loaded value. 1765 { Label zzz ; 1766 __ set (65536, G3_scratch) ; 1767 __ cmp (Otos_i, G3_scratch) ; 1768 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1769 __ delayed()->nop(); 1770 __ stop("BCI is in the wrong register half?"); 1771 __ bind (zzz) ; 1772 } 1773 #endif 1774 #endif 1775 1776 __ profile_ret(vtos, Otos_i, G4_scratch); 1777 1778 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1779 __ add(G3_scratch, Otos_i, G3_scratch); 1780 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1781 __ dispatch_next(vtos); 1782 } 1783 1784 1785 void TemplateTable::wide_ret() { 1786 transition(vtos, vtos); 1787 locals_index_wide(G3_scratch); 1788 __ access_local_returnAddress(G3_scratch, Otos_i); 1789 // Otos_i contains the bci, compute the bcp from that 1790 1791 __ profile_ret(vtos, Otos_i, G4_scratch); 1792 1793 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1794 __ add(G3_scratch, Otos_i, G3_scratch); 1795 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1796 __ dispatch_next(vtos); 1797 } 1798 1799 1800 void TemplateTable::tableswitch() { 1801 transition(itos, vtos); 1802 Label default_case, continue_execution; 1803 1804 // align bcp 1805 __ add(Lbcp, BytesPerInt, O1); 1806 __ and3(O1, -BytesPerInt, O1); 1807 // load lo, hi 1808 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1809 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1810 #ifdef _LP64 1811 // Sign extend the 32 bits 1812 __ sra ( Otos_i, 0, Otos_i ); 1813 #endif /* _LP64 */ 1814 1815 // check against lo & hi 1816 __ cmp( Otos_i, O2); 1817 __ br( Assembler::less, false, Assembler::pn, default_case); 1818 __ delayed()->cmp( Otos_i, O3 ); 1819 __ br( Assembler::greater, false, Assembler::pn, default_case); 1820 // lookup dispatch offset 1821 __ delayed()->sub(Otos_i, O2, O2); 1822 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1823 __ sll(O2, LogBytesPerInt, O2); 1824 __ add(O2, 3 * BytesPerInt, O2); 1825 __ ba(continue_execution); 1826 __ delayed()->ld(O1, O2, O2); 1827 // handle default 1828 __ bind(default_case); 1829 __ profile_switch_default(O3); 1830 __ ld(O1, 0, O2); // get default offset 1831 // continue execution 1832 __ bind(continue_execution); 1833 __ add(Lbcp, O2, Lbcp); 1834 __ dispatch_next(vtos); 1835 } 1836 1837 1838 void TemplateTable::lookupswitch() { 1839 transition(itos, itos); 1840 __ stop("lookupswitch bytecode should have been rewritten"); 1841 } 1842 1843 void TemplateTable::fast_linearswitch() { 1844 transition(itos, vtos); 1845 Label loop_entry, loop, found, continue_execution; 1846 // align bcp 1847 __ add(Lbcp, BytesPerInt, O1); 1848 __ and3(O1, -BytesPerInt, O1); 1849 // set counter 1850 __ ld(O1, BytesPerInt, O2); 1851 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1852 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1853 __ ba(loop_entry); 1854 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1855 1856 // table search 1857 __ bind(loop); 1858 __ cmp(O4, Otos_i); 1859 __ br(Assembler::equal, true, Assembler::pn, found); 1860 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1861 __ inc(O3, 2 * BytesPerInt); 1862 1863 __ bind(loop_entry); 1864 __ cmp(O2, O3); 1865 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1866 __ delayed()->ld(O3, 0, O4); 1867 1868 // default case 1869 __ ld(O1, 0, O4); // get default offset 1870 if (ProfileInterpreter) { 1871 __ profile_switch_default(O3); 1872 __ ba_short(continue_execution); 1873 } 1874 1875 // entry found -> get offset 1876 __ bind(found); 1877 if (ProfileInterpreter) { 1878 __ sub(O3, O1, O3); 1879 __ sub(O3, 2*BytesPerInt, O3); 1880 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1881 __ profile_switch_case(O3, O1, O2, G3_scratch); 1882 1883 __ bind(continue_execution); 1884 } 1885 __ add(Lbcp, O4, Lbcp); 1886 __ dispatch_next(vtos); 1887 } 1888 1889 1890 void TemplateTable::fast_binaryswitch() { 1891 transition(itos, vtos); 1892 // Implementation using the following core algorithm: (copied from Intel) 1893 // 1894 // int binary_search(int key, LookupswitchPair* array, int n) { 1895 // // Binary search according to "Methodik des Programmierens" by 1896 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1897 // int i = 0; 1898 // int j = n; 1899 // while (i+1 < j) { 1900 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1901 // // with Q: for all i: 0 <= i < n: key < a[i] 1902 // // where a stands for the array and assuming that the (inexisting) 1903 // // element a[n] is infinitely big. 1904 // int h = (i + j) >> 1; 1905 // // i < h < j 1906 // if (key < array[h].fast_match()) { 1907 // j = h; 1908 // } else { 1909 // i = h; 1910 // } 1911 // } 1912 // // R: a[i] <= key < a[i+1] or Q 1913 // // (i.e., if key is within array, i is the correct index) 1914 // return i; 1915 // } 1916 1917 // register allocation 1918 assert(Otos_i == O0, "alias checking"); 1919 const Register Rkey = Otos_i; // already set (tosca) 1920 const Register Rarray = O1; 1921 const Register Ri = O2; 1922 const Register Rj = O3; 1923 const Register Rh = O4; 1924 const Register Rscratch = O5; 1925 1926 const int log_entry_size = 3; 1927 const int entry_size = 1 << log_entry_size; 1928 1929 Label found; 1930 // Find Array start 1931 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1932 __ and3(Rarray, -BytesPerInt, Rarray); 1933 // initialize i & j (in delay slot) 1934 __ clr( Ri ); 1935 1936 // and start 1937 Label entry; 1938 __ ba(entry); 1939 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1940 // (Rj is already in the native byte-ordering.) 1941 1942 // binary search loop 1943 { Label loop; 1944 __ bind( loop ); 1945 // int h = (i + j) >> 1; 1946 __ sra( Rh, 1, Rh ); 1947 // if (key < array[h].fast_match()) { 1948 // j = h; 1949 // } else { 1950 // i = h; 1951 // } 1952 __ sll( Rh, log_entry_size, Rscratch ); 1953 __ ld( Rarray, Rscratch, Rscratch ); 1954 // (Rscratch is already in the native byte-ordering.) 1955 __ cmp( Rkey, Rscratch ); 1956 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1957 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1958 1959 // while (i+1 < j) 1960 __ bind( entry ); 1961 __ add( Ri, 1, Rscratch ); 1962 __ cmp(Rscratch, Rj); 1963 __ br( Assembler::less, true, Assembler::pt, loop ); 1964 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1965 } 1966 1967 // end of binary search, result index is i (must check again!) 1968 Label default_case; 1969 Label continue_execution; 1970 if (ProfileInterpreter) { 1971 __ mov( Ri, Rh ); // Save index in i for profiling 1972 } 1973 __ sll( Ri, log_entry_size, Ri ); 1974 __ ld( Rarray, Ri, Rscratch ); 1975 // (Rscratch is already in the native byte-ordering.) 1976 __ cmp( Rkey, Rscratch ); 1977 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1978 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1979 1980 // entry found -> j = offset 1981 __ inc( Ri, BytesPerInt ); 1982 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1983 __ ld( Rarray, Ri, Rj ); 1984 // (Rj is already in the native byte-ordering.) 1985 1986 if (ProfileInterpreter) { 1987 __ ba_short(continue_execution); 1988 } 1989 1990 __ bind(default_case); // fall through (if not profiling) 1991 __ profile_switch_default(Ri); 1992 1993 __ bind(continue_execution); 1994 __ add( Lbcp, Rj, Lbcp ); 1995 __ dispatch_next( vtos ); 1996 } 1997 1998 1999 void TemplateTable::_return(TosState state) { 2000 transition(state, state); 2001 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 2002 2003 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2004 assert(state == vtos, "only valid state"); 2005 __ mov(G0, G3_scratch); 2006 __ access_local_ptr(G3_scratch, Otos_i); 2007 __ load_klass(Otos_i, O2); 2008 __ set(JVM_ACC_HAS_FINALIZER, G3); 2009 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 2010 __ andcc(G3, O2, G0); 2011 Label skip_register_finalizer; 2012 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 2013 __ delayed()->nop(); 2014 2015 // Call out to do finalizer registration 2016 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 2017 2018 __ bind(skip_register_finalizer); 2019 } 2020 2021 // Narrow result if state is itos but result type is smaller. 2022 // Need to narrow in the return bytecode rather than in generate_return_entry 2023 // since compiled code callers expect the result to already be narrowed. 2024 if (state == itos) { 2025 __ narrow(Otos_i); 2026 } 2027 __ remove_activation(state, /* throw_monitor_exception */ true); 2028 2029 // The caller's SP was adjusted upon method entry to accomodate 2030 // the callee's non-argument locals. Undo that adjustment. 2031 __ ret(); // return to caller 2032 __ delayed()->restore(I5_savedSP, G0, SP); 2033 } 2034 2035 2036 // ---------------------------------------------------------------------------- 2037 // Volatile variables demand their effects be made known to all CPU's in 2038 // order. Store buffers on most chips allow reads & writes to reorder; the 2039 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2040 // memory barrier (i.e., it's not sufficient that the interpreter does not 2041 // reorder volatile references, the hardware also must not reorder them). 2042 // 2043 // According to the new Java Memory Model (JMM): 2044 // (1) All volatiles are serialized wrt to each other. 2045 // ALSO reads & writes act as aquire & release, so: 2046 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2047 // the read float up to before the read. It's OK for non-volatile memory refs 2048 // that happen before the volatile read to float down below it. 2049 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2050 // that happen BEFORE the write float down to after the write. It's OK for 2051 // non-volatile memory refs that happen after the volatile write to float up 2052 // before it. 2053 // 2054 // We only put in barriers around volatile refs (they are expensive), not 2055 // _between_ memory refs (that would require us to track the flavor of the 2056 // previous memory refs). Requirements (2) and (3) require some barriers 2057 // before volatile stores and after volatile loads. These nearly cover 2058 // requirement (1) but miss the volatile-store-volatile-load case. This final 2059 // case is placed after volatile-stores although it could just as well go 2060 // before volatile-loads. 2061 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 2062 // Helper function to insert a is-volatile test and memory barrier 2063 // All current sparc implementations run in TSO, needing only StoreLoad 2064 if ((order_constraint & Assembler::StoreLoad) == 0) return; 2065 __ membar( order_constraint ); 2066 } 2067 2068 // ---------------------------------------------------------------------------- 2069 void TemplateTable::resolve_cache_and_index(int byte_no, 2070 Register Rcache, 2071 Register index, 2072 size_t index_size) { 2073 // Depends on cpCacheOop layout! 2074 Label resolved; 2075 2076 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2077 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 2078 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode? 2079 __ br(Assembler::equal, false, Assembler::pt, resolved); 2080 __ delayed()->set((int)bytecode(), O1); 2081 2082 address entry; 2083 switch (bytecode()) { 2084 case Bytecodes::_getstatic : // fall through 2085 case Bytecodes::_putstatic : // fall through 2086 case Bytecodes::_getfield : // fall through 2087 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2088 case Bytecodes::_invokevirtual : // fall through 2089 case Bytecodes::_invokespecial : // fall through 2090 case Bytecodes::_invokestatic : // fall through 2091 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2092 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2093 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2094 default: 2095 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); 2096 break; 2097 } 2098 // first time invocation - must resolve first 2099 __ call_VM(noreg, entry, O1); 2100 // Update registers with resolved info 2101 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2102 __ bind(resolved); 2103 } 2104 2105 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2106 Register method, 2107 Register itable_index, 2108 Register flags, 2109 bool is_invokevirtual, 2110 bool is_invokevfinal, 2111 bool is_invokedynamic) { 2112 // Uses both G3_scratch and G4_scratch 2113 Register cache = G3_scratch; 2114 Register index = G4_scratch; 2115 assert_different_registers(cache, method, itable_index); 2116 2117 // determine constant pool cache field offsets 2118 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2119 const int method_offset = in_bytes( 2120 ConstantPoolCache::base_offset() + 2121 ((byte_no == f2_byte) 2122 ? ConstantPoolCacheEntry::f2_offset() 2123 : ConstantPoolCacheEntry::f1_offset() 2124 ) 2125 ); 2126 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2127 ConstantPoolCacheEntry::flags_offset()); 2128 // access constant pool cache fields 2129 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2130 ConstantPoolCacheEntry::f2_offset()); 2131 2132 if (is_invokevfinal) { 2133 __ get_cache_and_index_at_bcp(cache, index, 1); 2134 __ ld_ptr(Address(cache, method_offset), method); 2135 } else { 2136 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2137 resolve_cache_and_index(byte_no, cache, index, index_size); 2138 __ ld_ptr(Address(cache, method_offset), method); 2139 } 2140 2141 if (itable_index != noreg) { 2142 // pick up itable or appendix index from f2 also: 2143 __ ld_ptr(Address(cache, index_offset), itable_index); 2144 } 2145 __ ld_ptr(Address(cache, flags_offset), flags); 2146 } 2147 2148 // The Rcache register must be set before call 2149 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2150 Register Rcache, 2151 Register index, 2152 Register Roffset, 2153 Register Rflags, 2154 bool is_static) { 2155 assert_different_registers(Rcache, Rflags, Roffset); 2156 2157 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2158 2159 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2160 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2161 if (is_static) { 2162 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2163 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2164 __ ld_ptr( Robj, mirror_offset, Robj); 2165 } 2166 } 2167 2168 // The registers Rcache and index expected to be set before call. 2169 // Correct values of the Rcache and index registers are preserved. 2170 void TemplateTable::jvmti_post_field_access(Register Rcache, 2171 Register index, 2172 bool is_static, 2173 bool has_tos) { 2174 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2175 2176 if (JvmtiExport::can_post_field_access()) { 2177 // Check to see if a field access watch has been set before we take 2178 // the time to call into the VM. 2179 Label Label1; 2180 assert_different_registers(Rcache, index, G1_scratch); 2181 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2182 __ load_contents(get_field_access_count_addr, G1_scratch); 2183 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2184 2185 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2186 2187 if (is_static) { 2188 __ clr(Otos_i); 2189 } else { 2190 if (has_tos) { 2191 // save object pointer before call_VM() clobbers it 2192 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2193 } else { 2194 // Load top of stack (do not pop the value off the stack); 2195 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2196 } 2197 __ verify_oop(Otos_i); 2198 } 2199 // Otos_i: object pointer or NULL if static 2200 // Rcache: cache entry pointer 2201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2202 Otos_i, Rcache); 2203 if (!is_static && has_tos) { 2204 __ pop_ptr(Otos_i); // restore object pointer 2205 __ verify_oop(Otos_i); 2206 } 2207 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2208 __ bind(Label1); 2209 } 2210 } 2211 2212 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2213 transition(vtos, vtos); 2214 2215 Register Rcache = G3_scratch; 2216 Register index = G4_scratch; 2217 Register Rclass = Rcache; 2218 Register Roffset= G4_scratch; 2219 Register Rflags = G1_scratch; 2220 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2221 2222 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2223 jvmti_post_field_access(Rcache, index, is_static, false); 2224 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2225 2226 if (!is_static) { 2227 pop_and_check_object(Rclass); 2228 } else { 2229 __ verify_oop(Rclass); 2230 } 2231 2232 Label exit; 2233 2234 Assembler::Membar_mask_bits membar_bits = 2235 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2236 2237 if (__ membar_has_effect(membar_bits)) { 2238 // Get volatile flag 2239 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2240 __ and3(Rflags, Lscratch, Lscratch); 2241 } 2242 2243 Label checkVolatile; 2244 2245 // compute field type 2246 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; 2247 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2248 // Make sure we don't need to mask Rflags after the above shift 2249 ConstantPoolCacheEntry::verify_tos_state_shift(); 2250 2251 // Check atos before itos for getstatic, more likely (in Queens at least) 2252 __ cmp(Rflags, atos); 2253 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2254 __ delayed() ->cmp(Rflags, itos); 2255 2256 // atos 2257 __ load_heap_oop(Rclass, Roffset, Otos_i); 2258 __ verify_oop(Otos_i); 2259 __ push(atos); 2260 if (!is_static) { 2261 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2262 } 2263 __ ba(checkVolatile); 2264 __ delayed()->tst(Lscratch); 2265 2266 __ bind(notObj); 2267 2268 // cmp(Rflags, itos); 2269 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2270 __ delayed() ->cmp(Rflags, ltos); 2271 2272 // itos 2273 __ ld(Rclass, Roffset, Otos_i); 2274 __ push(itos); 2275 if (!is_static) { 2276 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2277 } 2278 __ ba(checkVolatile); 2279 __ delayed()->tst(Lscratch); 2280 2281 __ bind(notInt); 2282 2283 // cmp(Rflags, ltos); 2284 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2285 __ delayed() ->cmp(Rflags, btos); 2286 2287 // ltos 2288 // load must be atomic 2289 __ ld_long(Rclass, Roffset, Otos_l); 2290 __ push(ltos); 2291 if (!is_static) { 2292 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2293 } 2294 __ ba(checkVolatile); 2295 __ delayed()->tst(Lscratch); 2296 2297 __ bind(notLong); 2298 2299 // cmp(Rflags, btos); 2300 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2301 __ delayed() ->cmp(Rflags, ztos); 2302 2303 // btos 2304 __ ldsb(Rclass, Roffset, Otos_i); 2305 __ push(itos); 2306 if (!is_static) { 2307 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2308 } 2309 __ ba(checkVolatile); 2310 __ delayed()->tst(Lscratch); 2311 2312 __ bind(notByte); 2313 2314 // cmp(Rflags, ztos); 2315 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2316 __ delayed() ->cmp(Rflags, ctos); 2317 2318 // ztos 2319 __ ldsb(Rclass, Roffset, Otos_i); 2320 __ push(itos); 2321 if (!is_static) { 2322 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2323 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2324 } 2325 __ ba(checkVolatile); 2326 __ delayed()->tst(Lscratch); 2327 2328 __ bind(notBool); 2329 2330 // cmp(Rflags, ctos); 2331 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2332 __ delayed() ->cmp(Rflags, stos); 2333 2334 // ctos 2335 __ lduh(Rclass, Roffset, Otos_i); 2336 __ push(itos); 2337 if (!is_static) { 2338 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2339 } 2340 __ ba(checkVolatile); 2341 __ delayed()->tst(Lscratch); 2342 2343 __ bind(notChar); 2344 2345 // cmp(Rflags, stos); 2346 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2347 __ delayed() ->cmp(Rflags, ftos); 2348 2349 // stos 2350 __ ldsh(Rclass, Roffset, Otos_i); 2351 __ push(itos); 2352 if (!is_static) { 2353 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2354 } 2355 __ ba(checkVolatile); 2356 __ delayed()->tst(Lscratch); 2357 2358 __ bind(notShort); 2359 2360 2361 // cmp(Rflags, ftos); 2362 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2363 __ delayed() ->tst(Lscratch); 2364 2365 // ftos 2366 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2367 __ push(ftos); 2368 if (!is_static) { 2369 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2370 } 2371 __ ba(checkVolatile); 2372 __ delayed()->tst(Lscratch); 2373 2374 __ bind(notFloat); 2375 2376 2377 // dtos 2378 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2379 __ push(dtos); 2380 if (!is_static) { 2381 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2382 } 2383 2384 __ bind(checkVolatile); 2385 if (__ membar_has_effect(membar_bits)) { 2386 // __ tst(Lscratch); executed in delay slot 2387 __ br(Assembler::zero, false, Assembler::pt, exit); 2388 __ delayed()->nop(); 2389 volatile_barrier(membar_bits); 2390 } 2391 2392 __ bind(exit); 2393 } 2394 2395 2396 void TemplateTable::getfield(int byte_no) { 2397 getfield_or_static(byte_no, false); 2398 } 2399 2400 void TemplateTable::getstatic(int byte_no) { 2401 getfield_or_static(byte_no, true); 2402 } 2403 2404 2405 void TemplateTable::fast_accessfield(TosState state) { 2406 transition(atos, state); 2407 Register Rcache = G3_scratch; 2408 Register index = G4_scratch; 2409 Register Roffset = G4_scratch; 2410 Register Rflags = Rcache; 2411 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2412 2413 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2414 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2415 2416 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2417 2418 __ null_check(Otos_i); 2419 __ verify_oop(Otos_i); 2420 2421 Label exit; 2422 2423 Assembler::Membar_mask_bits membar_bits = 2424 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2425 if (__ membar_has_effect(membar_bits)) { 2426 // Get volatile flag 2427 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2428 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2429 } 2430 2431 switch (bytecode()) { 2432 case Bytecodes::_fast_bgetfield: 2433 __ ldsb(Otos_i, Roffset, Otos_i); 2434 break; 2435 case Bytecodes::_fast_cgetfield: 2436 __ lduh(Otos_i, Roffset, Otos_i); 2437 break; 2438 case Bytecodes::_fast_sgetfield: 2439 __ ldsh(Otos_i, Roffset, Otos_i); 2440 break; 2441 case Bytecodes::_fast_igetfield: 2442 __ ld(Otos_i, Roffset, Otos_i); 2443 break; 2444 case Bytecodes::_fast_lgetfield: 2445 __ ld_long(Otos_i, Roffset, Otos_l); 2446 break; 2447 case Bytecodes::_fast_fgetfield: 2448 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2449 break; 2450 case Bytecodes::_fast_dgetfield: 2451 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2452 break; 2453 case Bytecodes::_fast_agetfield: 2454 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2455 break; 2456 default: 2457 ShouldNotReachHere(); 2458 } 2459 2460 if (__ membar_has_effect(membar_bits)) { 2461 __ btst(Lscratch, Rflags); 2462 __ br(Assembler::zero, false, Assembler::pt, exit); 2463 __ delayed()->nop(); 2464 volatile_barrier(membar_bits); 2465 __ bind(exit); 2466 } 2467 2468 if (state == atos) { 2469 __ verify_oop(Otos_i); // does not blow flags! 2470 } 2471 } 2472 2473 void TemplateTable::jvmti_post_fast_field_mod() { 2474 if (JvmtiExport::can_post_field_modification()) { 2475 // Check to see if a field modification watch has been set before we take 2476 // the time to call into the VM. 2477 Label done; 2478 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2479 __ load_contents(get_field_modification_count_addr, G4_scratch); 2480 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2481 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2482 __ verify_oop(G4_scratch); 2483 __ push_ptr(G4_scratch); // put the object pointer back on tos 2484 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2485 // Save tos values before call_VM() clobbers them. Since we have 2486 // to do it for every data type, we use the saved values as the 2487 // jvalue object. 2488 switch (bytecode()) { // save tos values before call_VM() clobbers them 2489 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2490 case Bytecodes::_fast_bputfield: // fall through 2491 case Bytecodes::_fast_zputfield: // fall through 2492 case Bytecodes::_fast_sputfield: // fall through 2493 case Bytecodes::_fast_cputfield: // fall through 2494 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2495 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2496 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2497 // get words in right order for use as jvalue object 2498 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2499 } 2500 // setup pointer to jvalue object 2501 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2502 // G4_scratch: object pointer 2503 // G1_scratch: cache entry pointer 2504 // G3_scratch: jvalue object on the stack 2505 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2506 switch (bytecode()) { // restore tos values 2507 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2508 case Bytecodes::_fast_bputfield: // fall through 2509 case Bytecodes::_fast_zputfield: // fall through 2510 case Bytecodes::_fast_sputfield: // fall through 2511 case Bytecodes::_fast_cputfield: // fall through 2512 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2513 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2514 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2515 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2516 } 2517 __ bind(done); 2518 } 2519 } 2520 2521 // The registers Rcache and index expected to be set before call. 2522 // The function may destroy various registers, just not the Rcache and index registers. 2523 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2524 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2525 2526 if (JvmtiExport::can_post_field_modification()) { 2527 // Check to see if a field modification watch has been set before we take 2528 // the time to call into the VM. 2529 Label Label1; 2530 assert_different_registers(Rcache, index, G1_scratch); 2531 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2532 __ load_contents(get_field_modification_count_addr, G1_scratch); 2533 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2534 2535 // The Rcache and index registers have been already set. 2536 // This allows to eliminate this call but the Rcache and index 2537 // registers must be correspondingly used after this line. 2538 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2539 2540 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2541 if (is_static) { 2542 // Life is simple. Null out the object pointer. 2543 __ clr(G4_scratch); 2544 } else { 2545 Register Rflags = G1_scratch; 2546 // Life is harder. The stack holds the value on top, followed by the 2547 // object. We don't know the size of the value, though; it could be 2548 // one or two words depending on its type. As a result, we must find 2549 // the type to determine where the object is. 2550 2551 Label two_word, valsizeknown; 2552 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2553 __ mov(Lesp, G4_scratch); 2554 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2555 // Make sure we don't need to mask Rflags after the above shift 2556 ConstantPoolCacheEntry::verify_tos_state_shift(); 2557 __ cmp(Rflags, ltos); 2558 __ br(Assembler::equal, false, Assembler::pt, two_word); 2559 __ delayed()->cmp(Rflags, dtos); 2560 __ br(Assembler::equal, false, Assembler::pt, two_word); 2561 __ delayed()->nop(); 2562 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2563 __ ba_short(valsizeknown); 2564 __ bind(two_word); 2565 2566 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2567 2568 __ bind(valsizeknown); 2569 // setup object pointer 2570 __ ld_ptr(G4_scratch, 0, G4_scratch); 2571 __ verify_oop(G4_scratch); 2572 } 2573 // setup pointer to jvalue object 2574 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2575 // G4_scratch: object pointer or NULL if static 2576 // G3_scratch: cache entry pointer 2577 // G1_scratch: jvalue object on the stack 2578 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2579 G4_scratch, G3_scratch, G1_scratch); 2580 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2581 __ bind(Label1); 2582 } 2583 } 2584 2585 void TemplateTable::pop_and_check_object(Register r) { 2586 __ pop_ptr(r); 2587 __ null_check(r); // for field access must check obj. 2588 __ verify_oop(r); 2589 } 2590 2591 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2592 transition(vtos, vtos); 2593 Register Rcache = G3_scratch; 2594 Register index = G4_scratch; 2595 Register Rclass = Rcache; 2596 Register Roffset= G4_scratch; 2597 Register Rflags = G1_scratch; 2598 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2599 2600 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2601 jvmti_post_field_mod(Rcache, index, is_static); 2602 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2603 2604 Assembler::Membar_mask_bits read_bits = 2605 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2606 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2607 2608 Label notVolatile, checkVolatile, exit; 2609 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2610 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2611 __ and3(Rflags, Lscratch, Lscratch); 2612 2613 if (__ membar_has_effect(read_bits)) { 2614 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2615 volatile_barrier(read_bits); 2616 __ bind(notVolatile); 2617 } 2618 } 2619 2620 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2621 // Make sure we don't need to mask Rflags after the above shift 2622 ConstantPoolCacheEntry::verify_tos_state_shift(); 2623 2624 // compute field type 2625 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat; 2626 2627 if (is_static) { 2628 // putstatic with object type most likely, check that first 2629 __ cmp(Rflags, atos); 2630 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2631 __ delayed()->cmp(Rflags, itos); 2632 2633 // atos 2634 { 2635 __ pop_ptr(); 2636 __ verify_oop(Otos_i); 2637 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2638 __ ba(checkVolatile); 2639 __ delayed()->tst(Lscratch); 2640 } 2641 2642 __ bind(notObj); 2643 // cmp(Rflags, itos); 2644 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2645 __ delayed()->cmp(Rflags, btos); 2646 2647 // itos 2648 { 2649 __ pop_i(); 2650 __ st(Otos_i, Rclass, Roffset); 2651 __ ba(checkVolatile); 2652 __ delayed()->tst(Lscratch); 2653 } 2654 2655 __ bind(notInt); 2656 } else { 2657 // putfield with int type most likely, check that first 2658 __ cmp(Rflags, itos); 2659 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2660 __ delayed()->cmp(Rflags, atos); 2661 2662 // itos 2663 { 2664 __ pop_i(); 2665 pop_and_check_object(Rclass); 2666 __ st(Otos_i, Rclass, Roffset); 2667 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2668 __ ba(checkVolatile); 2669 __ delayed()->tst(Lscratch); 2670 } 2671 2672 __ bind(notInt); 2673 // cmp(Rflags, atos); 2674 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2675 __ delayed()->cmp(Rflags, btos); 2676 2677 // atos 2678 { 2679 __ pop_ptr(); 2680 pop_and_check_object(Rclass); 2681 __ verify_oop(Otos_i); 2682 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2683 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2684 __ ba(checkVolatile); 2685 __ delayed()->tst(Lscratch); 2686 } 2687 2688 __ bind(notObj); 2689 } 2690 2691 // cmp(Rflags, btos); 2692 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2693 __ delayed()->cmp(Rflags, ztos); 2694 2695 // btos 2696 { 2697 __ pop_i(); 2698 if (!is_static) pop_and_check_object(Rclass); 2699 __ stb(Otos_i, Rclass, Roffset); 2700 if (!is_static) { 2701 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2702 } 2703 __ ba(checkVolatile); 2704 __ delayed()->tst(Lscratch); 2705 } 2706 2707 __ bind(notByte); 2708 2709 // cmp(Rflags, btos); 2710 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2711 __ delayed()->cmp(Rflags, ltos); 2712 2713 // ztos 2714 { 2715 __ pop_i(); 2716 if (!is_static) pop_and_check_object(Rclass); 2717 __ and3(Otos_i, 1, Otos_i); 2718 __ stb(Otos_i, Rclass, Roffset); 2719 if (!is_static) { 2720 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no); 2721 } 2722 __ ba(checkVolatile); 2723 __ delayed()->tst(Lscratch); 2724 } 2725 2726 __ bind(notBool); 2727 // cmp(Rflags, ltos); 2728 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2729 __ delayed()->cmp(Rflags, ctos); 2730 2731 // ltos 2732 { 2733 __ pop_l(); 2734 if (!is_static) pop_and_check_object(Rclass); 2735 __ st_long(Otos_l, Rclass, Roffset); 2736 if (!is_static) { 2737 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2738 } 2739 __ ba(checkVolatile); 2740 __ delayed()->tst(Lscratch); 2741 } 2742 2743 __ bind(notLong); 2744 // cmp(Rflags, ctos); 2745 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2746 __ delayed()->cmp(Rflags, stos); 2747 2748 // ctos (char) 2749 { 2750 __ pop_i(); 2751 if (!is_static) pop_and_check_object(Rclass); 2752 __ sth(Otos_i, Rclass, Roffset); 2753 if (!is_static) { 2754 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2755 } 2756 __ ba(checkVolatile); 2757 __ delayed()->tst(Lscratch); 2758 } 2759 2760 __ bind(notChar); 2761 // cmp(Rflags, stos); 2762 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2763 __ delayed()->cmp(Rflags, ftos); 2764 2765 // stos (short) 2766 { 2767 __ pop_i(); 2768 if (!is_static) pop_and_check_object(Rclass); 2769 __ sth(Otos_i, Rclass, Roffset); 2770 if (!is_static) { 2771 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2772 } 2773 __ ba(checkVolatile); 2774 __ delayed()->tst(Lscratch); 2775 } 2776 2777 __ bind(notShort); 2778 // cmp(Rflags, ftos); 2779 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2780 __ delayed()->nop(); 2781 2782 // ftos 2783 { 2784 __ pop_f(); 2785 if (!is_static) pop_and_check_object(Rclass); 2786 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2787 if (!is_static) { 2788 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2789 } 2790 __ ba(checkVolatile); 2791 __ delayed()->tst(Lscratch); 2792 } 2793 2794 __ bind(notFloat); 2795 2796 // dtos 2797 { 2798 __ pop_d(); 2799 if (!is_static) pop_and_check_object(Rclass); 2800 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2801 if (!is_static) { 2802 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2803 } 2804 } 2805 2806 __ bind(checkVolatile); 2807 __ tst(Lscratch); 2808 2809 if (__ membar_has_effect(write_bits)) { 2810 // __ tst(Lscratch); in delay slot 2811 __ br(Assembler::zero, false, Assembler::pt, exit); 2812 __ delayed()->nop(); 2813 volatile_barrier(Assembler::StoreLoad); 2814 __ bind(exit); 2815 } 2816 } 2817 2818 void TemplateTable::fast_storefield(TosState state) { 2819 transition(state, vtos); 2820 Register Rcache = G3_scratch; 2821 Register Rclass = Rcache; 2822 Register Roffset= G4_scratch; 2823 Register Rflags = G1_scratch; 2824 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2825 2826 jvmti_post_fast_field_mod(); 2827 2828 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2829 2830 Assembler::Membar_mask_bits read_bits = 2831 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2832 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2833 2834 Label notVolatile, checkVolatile, exit; 2835 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2836 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2837 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2838 __ and3(Rflags, Lscratch, Lscratch); 2839 if (__ membar_has_effect(read_bits)) { 2840 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2841 volatile_barrier(read_bits); 2842 __ bind(notVolatile); 2843 } 2844 } 2845 2846 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2847 pop_and_check_object(Rclass); 2848 2849 switch (bytecode()) { 2850 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield 2851 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2852 case Bytecodes::_fast_cputfield: /* fall through */ 2853 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2854 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2855 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2856 case Bytecodes::_fast_fputfield: 2857 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2858 break; 2859 case Bytecodes::_fast_dputfield: 2860 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2861 break; 2862 case Bytecodes::_fast_aputfield: 2863 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2864 break; 2865 default: 2866 ShouldNotReachHere(); 2867 } 2868 2869 if (__ membar_has_effect(write_bits)) { 2870 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2871 volatile_barrier(Assembler::StoreLoad); 2872 __ bind(exit); 2873 } 2874 } 2875 2876 2877 void TemplateTable::putfield(int byte_no) { 2878 putfield_or_static(byte_no, false); 2879 } 2880 2881 void TemplateTable::putstatic(int byte_no) { 2882 putfield_or_static(byte_no, true); 2883 } 2884 2885 2886 void TemplateTable::fast_xaccess(TosState state) { 2887 transition(vtos, state); 2888 Register Rcache = G3_scratch; 2889 Register Roffset = G4_scratch; 2890 Register Rflags = G4_scratch; 2891 Register Rreceiver = Lscratch; 2892 2893 __ ld_ptr(Llocals, 0, Rreceiver); 2894 2895 // access constant pool cache (is resolved) 2896 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2897 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2898 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2899 2900 __ verify_oop(Rreceiver); 2901 __ null_check(Rreceiver); 2902 if (state == atos) { 2903 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2904 } else if (state == itos) { 2905 __ ld (Rreceiver, Roffset, Otos_i) ; 2906 } else if (state == ftos) { 2907 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2908 } else { 2909 ShouldNotReachHere(); 2910 } 2911 2912 Assembler::Membar_mask_bits membar_bits = 2913 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2914 if (__ membar_has_effect(membar_bits)) { 2915 2916 // Get is_volatile value in Rflags and check if membar is needed 2917 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2918 2919 // Test volatile 2920 Label notVolatile; 2921 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2922 __ btst(Rflags, Lscratch); 2923 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2924 __ delayed()->nop(); 2925 volatile_barrier(membar_bits); 2926 __ bind(notVolatile); 2927 } 2928 2929 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2930 __ sub(Lbcp, 1, Lbcp); 2931 } 2932 2933 //---------------------------------------------------------------------------------------------------- 2934 // Calls 2935 2936 void TemplateTable::count_calls(Register method, Register temp) { 2937 // implemented elsewhere 2938 ShouldNotReachHere(); 2939 } 2940 2941 void TemplateTable::prepare_invoke(int byte_no, 2942 Register method, // linked method (or i-klass) 2943 Register ra, // return address 2944 Register index, // itable index, MethodType, etc. 2945 Register recv, // if caller wants to see it 2946 Register flags // if caller wants to test it 2947 ) { 2948 // determine flags 2949 const Bytecodes::Code code = bytecode(); 2950 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2951 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2952 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2953 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2954 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2955 const bool load_receiver = (recv != noreg); 2956 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2957 assert(recv == noreg || recv == O0, ""); 2958 assert(flags == noreg || flags == O1, ""); 2959 2960 // setup registers & access constant pool cache 2961 if (recv == noreg) recv = O0; 2962 if (flags == noreg) flags = O1; 2963 const Register temp = O2; 2964 assert_different_registers(method, ra, index, recv, flags, temp); 2965 2966 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2967 2968 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2969 2970 // maybe push appendix to arguments 2971 if (is_invokedynamic || is_invokehandle) { 2972 Label L_no_push; 2973 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2974 __ btst(flags, temp); 2975 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2976 __ delayed()->nop(); 2977 // Push the appendix as a trailing parameter. 2978 // This must be done before we get the receiver, 2979 // since the parameter_size includes it. 2980 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2981 __ load_resolved_reference_at_index(temp, index); 2982 __ verify_oop(temp); 2983 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2984 __ bind(L_no_push); 2985 } 2986 2987 // load receiver if needed (after appendix is pushed so parameter size is correct) 2988 if (load_receiver) { 2989 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2990 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2991 __ verify_oop(recv); 2992 } 2993 2994 // compute return type 2995 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2996 // Make sure we don't need to mask flags after the above shift 2997 ConstantPoolCacheEntry::verify_tos_state_shift(); 2998 // load return address 2999 { 3000 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3001 AddressLiteral table(table_addr); 3002 __ set(table, temp); 3003 __ sll(ra, LogBytesPerWord, ra); 3004 __ ld_ptr(Address(temp, ra), ra); 3005 } 3006 } 3007 3008 3009 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 3010 Register Rcall = Rindex; 3011 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3012 3013 // get target Method* & entry point 3014 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 3015 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3016 __ call_from_interpreter(Rcall, Gargs, Rret); 3017 } 3018 3019 void TemplateTable::invokevirtual(int byte_no) { 3020 transition(vtos, vtos); 3021 assert(byte_no == f2_byte, "use this argument"); 3022 3023 Register Rscratch = G3_scratch; 3024 Register Rtemp = G4_scratch; 3025 Register Rret = Lscratch; 3026 Register O0_recv = O0; 3027 Label notFinal; 3028 3029 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 3030 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3031 3032 // Check for vfinal 3033 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 3034 __ btst(Rret, G4_scratch); 3035 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3036 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 3037 3038 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 3039 3040 invokevfinal_helper(Rscratch, Rret); 3041 3042 __ bind(notFinal); 3043 3044 __ mov(G5_method, Rscratch); // better scratch register 3045 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 3046 // receiver is in O0_recv 3047 __ verify_oop(O0_recv); 3048 3049 // get return address 3050 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3051 __ set(table, Rtemp); 3052 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3053 // Make sure we don't need to mask Rret after the above shift 3054 ConstantPoolCacheEntry::verify_tos_state_shift(); 3055 __ sll(Rret, LogBytesPerWord, Rret); 3056 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3057 3058 // get receiver klass 3059 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3060 __ load_klass(O0_recv, O0_recv); 3061 __ verify_klass_ptr(O0_recv); 3062 3063 __ profile_virtual_call(O0_recv, O4); 3064 3065 generate_vtable_call(O0_recv, Rscratch, Rret); 3066 } 3067 3068 void TemplateTable::fast_invokevfinal(int byte_no) { 3069 transition(vtos, vtos); 3070 assert(byte_no == f2_byte, "use this argument"); 3071 3072 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 3073 /*is_invokevfinal*/true, false); 3074 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3075 invokevfinal_helper(G3_scratch, Lscratch); 3076 } 3077 3078 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 3079 Register Rtemp = G4_scratch; 3080 3081 // Load receiver from stack slot 3082 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 3083 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 3084 __ load_receiver(G4_scratch, O0); 3085 3086 // receiver NULL check 3087 __ null_check(O0); 3088 3089 __ profile_final_call(O4); 3090 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3091 3092 // get return address 3093 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3094 __ set(table, Rtemp); 3095 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3096 // Make sure we don't need to mask Rret after the above shift 3097 ConstantPoolCacheEntry::verify_tos_state_shift(); 3098 __ sll(Rret, LogBytesPerWord, Rret); 3099 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3100 3101 3102 // do the call 3103 __ call_from_interpreter(Rscratch, Gargs, Rret); 3104 } 3105 3106 3107 void TemplateTable::invokespecial(int byte_no) { 3108 transition(vtos, vtos); 3109 assert(byte_no == f1_byte, "use this argument"); 3110 3111 const Register Rret = Lscratch; 3112 const Register O0_recv = O0; 3113 const Register Rscratch = G3_scratch; 3114 3115 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3116 __ null_check(O0_recv); 3117 3118 // do the call 3119 __ profile_call(O4); 3120 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3121 __ call_from_interpreter(Rscratch, Gargs, Rret); 3122 } 3123 3124 3125 void TemplateTable::invokestatic(int byte_no) { 3126 transition(vtos, vtos); 3127 assert(byte_no == f1_byte, "use this argument"); 3128 3129 const Register Rret = Lscratch; 3130 const Register Rscratch = G3_scratch; 3131 3132 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3133 3134 // do the call 3135 __ profile_call(O4); 3136 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3137 __ call_from_interpreter(Rscratch, Gargs, Rret); 3138 } 3139 3140 void TemplateTable::invokeinterface_object_method(Register RKlass, 3141 Register Rcall, 3142 Register Rret, 3143 Register Rflags) { 3144 Register Rscratch = G4_scratch; 3145 Register Rindex = Lscratch; 3146 3147 assert_different_registers(Rscratch, Rindex, Rret); 3148 3149 Label notFinal; 3150 3151 // Check for vfinal 3152 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3153 __ btst(Rflags, Rscratch); 3154 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3155 __ delayed()->nop(); 3156 3157 __ profile_final_call(O4); 3158 3159 // do the call - the index (f2) contains the Method* 3160 assert_different_registers(G5_method, Gargs, Rcall); 3161 __ mov(Rindex, G5_method); 3162 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3163 __ call_from_interpreter(Rcall, Gargs, Rret); 3164 __ bind(notFinal); 3165 3166 __ profile_virtual_call(RKlass, O4); 3167 generate_vtable_call(RKlass, Rindex, Rret); 3168 } 3169 3170 3171 void TemplateTable::invokeinterface(int byte_no) { 3172 transition(vtos, vtos); 3173 assert(byte_no == f1_byte, "use this argument"); 3174 3175 const Register Rinterface = G1_scratch; 3176 const Register Rret = G3_scratch; 3177 const Register Rindex = Lscratch; 3178 const Register O0_recv = O0; 3179 const Register O1_flags = O1; 3180 const Register O2_Klass = O2; 3181 const Register Rscratch = G4_scratch; 3182 assert_different_registers(Rscratch, G5_method); 3183 3184 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); 3185 3186 // get receiver klass 3187 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3188 __ load_klass(O0_recv, O2_Klass); 3189 3190 // Special case of invokeinterface called for virtual method of 3191 // java.lang.Object. See cpCacheOop.cpp for details. 3192 // This code isn't produced by javac, but could be produced by 3193 // another compliant java compiler. 3194 Label notMethod; 3195 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3196 __ btst(O1_flags, Rscratch); 3197 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3198 __ delayed()->nop(); 3199 3200 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3201 3202 __ bind(notMethod); 3203 3204 __ profile_virtual_call(O2_Klass, O4); 3205 3206 // 3207 // find entry point to call 3208 // 3209 3210 // compute start of first itableOffsetEntry (which is at end of vtable) 3211 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3212 Label search; 3213 Register Rtemp = O1_flags; 3214 3215 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp); 3216 if (align_object_offset(1) > 1) { 3217 __ round_to(Rtemp, align_object_offset(1)); 3218 } 3219 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; 3220 if (Assembler::is_simm13(base)) { 3221 __ add(Rtemp, base, Rtemp); 3222 } else { 3223 __ set(base, Rscratch); 3224 __ add(Rscratch, Rtemp, Rtemp); 3225 } 3226 __ add(O2_Klass, Rtemp, Rscratch); 3227 3228 __ bind(search); 3229 3230 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); 3231 { 3232 Label ok; 3233 3234 // Check that entry is non-null. Null entries are probably a bytecode 3235 // problem. If the interface isn't implemented by the receiver class, 3236 // the VM should throw IncompatibleClassChangeError. linkResolver checks 3237 // this too but that's only if the entry isn't already resolved, so we 3238 // need to check again. 3239 __ br_notnull_short( Rtemp, Assembler::pt, ok); 3240 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3241 __ should_not_reach_here(); 3242 __ bind(ok); 3243 } 3244 3245 __ cmp(Rinterface, Rtemp); 3246 __ brx(Assembler::notEqual, true, Assembler::pn, search); 3247 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); 3248 3249 // entry found and Rscratch points to it 3250 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); 3251 3252 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); 3253 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; 3254 __ add(Rscratch, Rindex, Rscratch); 3255 __ ld_ptr(O2_Klass, Rscratch, G5_method); 3256 3257 // Check for abstract method error. 3258 { 3259 Label ok; 3260 __ br_notnull_short(G5_method, Assembler::pt, ok); 3261 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3262 __ should_not_reach_here(); 3263 __ bind(ok); 3264 } 3265 3266 Register Rcall = Rinterface; 3267 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3268 3269 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3270 __ call_from_interpreter(Rcall, Gargs, Rret); 3271 } 3272 3273 void TemplateTable::invokehandle(int byte_no) { 3274 transition(vtos, vtos); 3275 assert(byte_no == f1_byte, "use this argument"); 3276 3277 if (!EnableInvokeDynamic) { 3278 // rewriter does not generate this bytecode 3279 __ should_not_reach_here(); 3280 return; 3281 } 3282 3283 const Register Rret = Lscratch; 3284 const Register G4_mtype = G4_scratch; 3285 const Register O0_recv = O0; 3286 const Register Rscratch = G3_scratch; 3287 3288 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3289 __ null_check(O0_recv); 3290 3291 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3292 // G5: MH.invokeExact_MT method (from f2) 3293 3294 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3295 3296 // do the call 3297 __ verify_oop(G4_mtype); 3298 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3299 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3300 __ call_from_interpreter(Rscratch, Gargs, Rret); 3301 } 3302 3303 3304 void TemplateTable::invokedynamic(int byte_no) { 3305 transition(vtos, vtos); 3306 assert(byte_no == f1_byte, "use this argument"); 3307 3308 if (!EnableInvokeDynamic) { 3309 // We should not encounter this bytecode if !EnableInvokeDynamic. 3310 // The verifier will stop it. However, if we get past the verifier, 3311 // this will stop the thread in a reasonable way, without crashing the JVM. 3312 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3313 InterpreterRuntime::throw_IncompatibleClassChangeError)); 3314 // the call_VM checks for exception, so we should never return here. 3315 __ should_not_reach_here(); 3316 return; 3317 } 3318 3319 const Register Rret = Lscratch; 3320 const Register G4_callsite = G4_scratch; 3321 const Register Rscratch = G3_scratch; 3322 3323 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3324 3325 // G4: CallSite object (from cpool->resolved_references[f1]) 3326 // G5: MH.linkToCallSite method (from f2) 3327 3328 // Note: G4_callsite is already pushed by prepare_invoke 3329 3330 // %%% should make a type profile for any invokedynamic that takes a ref argument 3331 // profile this call 3332 __ profile_call(O4); 3333 3334 // do the call 3335 __ verify_oop(G4_callsite); 3336 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3337 __ call_from_interpreter(Rscratch, Gargs, Rret); 3338 } 3339 3340 3341 //---------------------------------------------------------------------------------------------------- 3342 // Allocation 3343 3344 void TemplateTable::_new() { 3345 transition(vtos, atos); 3346 3347 Label slow_case; 3348 Label done; 3349 Label initialize_header; 3350 Label initialize_object; // including clearing the fields 3351 3352 Register RallocatedObject = Otos_i; 3353 Register RinstanceKlass = O1; 3354 Register Roffset = O3; 3355 Register Rscratch = O4; 3356 3357 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3358 __ get_cpool_and_tags(Rscratch, G3_scratch); 3359 // make sure the class we're about to instantiate has been resolved 3360 // This is done before loading InstanceKlass to be consistent with the order 3361 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3362 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3363 __ ldub(G3_scratch, Roffset, G3_scratch); 3364 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3365 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3366 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3367 // get InstanceKlass 3368 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot 3369 __ add(Roffset, sizeof(ConstantPool), Roffset); 3370 __ ld_ptr(Rscratch, Roffset, RinstanceKlass); 3371 3372 // make sure klass is fully initialized: 3373 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3374 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3375 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3376 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3377 3378 // get instance_size in InstanceKlass (already aligned) 3379 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3380 3381 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3382 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3383 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3384 __ delayed()->nop(); 3385 3386 // allocate the instance 3387 // 1) Try to allocate in the TLAB 3388 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden 3389 // 3) if the above fails (or is not applicable), go to a slow case 3390 // (creates a new TLAB, etc.) 3391 3392 const bool allow_shared_alloc = 3393 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3394 3395 if(UseTLAB) { 3396 Register RoldTopValue = RallocatedObject; 3397 Register RtlabWasteLimitValue = G3_scratch; 3398 Register RnewTopValue = G1_scratch; 3399 Register RendValue = Rscratch; 3400 Register RfreeValue = RnewTopValue; 3401 3402 // check if we can allocate in the TLAB 3403 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3404 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3405 __ add(RoldTopValue, Roffset, RnewTopValue); 3406 3407 // if there is enough space, we do not CAS and do not clear 3408 __ cmp(RnewTopValue, RendValue); 3409 if(ZeroTLAB) { 3410 // the fields have already been cleared 3411 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3412 } else { 3413 // initialize both the header and fields 3414 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3415 } 3416 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3417 3418 if (allow_shared_alloc) { 3419 // Check if tlab should be discarded (refill_waste_limit >= free) 3420 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); 3421 __ sub(RendValue, RoldTopValue, RfreeValue); 3422 #ifdef _LP64 3423 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); 3424 #else 3425 __ srl(RfreeValue, LogHeapWordSize, RfreeValue); 3426 #endif 3427 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small 3428 3429 // increment waste limit to prevent getting stuck on this slow path 3430 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); 3431 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3432 } else { 3433 // No allocation in the shared eden. 3434 __ ba_short(slow_case); 3435 } 3436 } 3437 3438 // Allocation in the shared Eden 3439 if (allow_shared_alloc) { 3440 Register RoldTopValue = G1_scratch; 3441 Register RtopAddr = G3_scratch; 3442 Register RnewTopValue = RallocatedObject; 3443 Register RendValue = Rscratch; 3444 3445 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3446 3447 Label retry; 3448 __ bind(retry); 3449 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3450 __ ld_ptr(RendValue, 0, RendValue); 3451 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3452 __ add(RoldTopValue, Roffset, RnewTopValue); 3453 3454 // RnewTopValue contains the top address after the new object 3455 // has been allocated. 3456 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3457 3458 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3459 3460 // if someone beat us on the allocation, try again, otherwise continue 3461 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3462 3463 // bump total bytes allocated by this thread 3464 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3465 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3466 } 3467 3468 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3469 // clear object fields 3470 __ bind(initialize_object); 3471 __ deccc(Roffset, sizeof(oopDesc)); 3472 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3473 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3474 3475 // initialize remaining object fields 3476 if (UseBlockZeroing) { 3477 // Use BIS for zeroing 3478 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3479 } else { 3480 Label loop; 3481 __ subcc(Roffset, wordSize, Roffset); 3482 __ bind(loop); 3483 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3484 __ st_ptr(G0, G3_scratch, Roffset); 3485 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3486 __ delayed()->subcc(Roffset, wordSize, Roffset); 3487 } 3488 __ ba_short(initialize_header); 3489 } 3490 3491 // slow case 3492 __ bind(slow_case); 3493 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3494 __ get_constant_pool(O1); 3495 3496 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3497 3498 __ ba_short(done); 3499 3500 // Initialize the header: mark, klass 3501 __ bind(initialize_header); 3502 3503 if (UseBiasedLocking) { 3504 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3505 } else { 3506 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3507 } 3508 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3509 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3510 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3511 3512 { 3513 SkipIfEqual skip_if( 3514 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3515 // Trigger dtrace event 3516 __ push(atos); 3517 __ call_VM_leaf(noreg, 3518 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3519 __ pop(atos); 3520 } 3521 3522 // continue 3523 __ bind(done); 3524 } 3525 3526 3527 3528 void TemplateTable::newarray() { 3529 transition(itos, atos); 3530 __ ldub(Lbcp, 1, O1); 3531 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3532 } 3533 3534 3535 void TemplateTable::anewarray() { 3536 transition(itos, atos); 3537 __ get_constant_pool(O1); 3538 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3539 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3540 } 3541 3542 3543 void TemplateTable::arraylength() { 3544 transition(atos, itos); 3545 Label ok; 3546 __ verify_oop(Otos_i); 3547 __ tst(Otos_i); 3548 __ throw_if_not_1_x( Assembler::notZero, ok ); 3549 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3550 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3551 } 3552 3553 3554 void TemplateTable::checkcast() { 3555 transition(atos, atos); 3556 Label done, is_null, quicked, cast_ok, resolved; 3557 Register Roffset = G1_scratch; 3558 Register RobjKlass = O5; 3559 Register RspecifiedKlass = O4; 3560 3561 // Check for casting a NULL 3562 __ br_null_short(Otos_i, Assembler::pn, is_null); 3563 3564 // Get value klass in RobjKlass 3565 __ load_klass(Otos_i, RobjKlass); // get value klass 3566 3567 // Get constant pool tag 3568 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3569 3570 // See if the checkcast has been quickened 3571 __ get_cpool_and_tags(Lscratch, G3_scratch); 3572 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3573 __ ldub(G3_scratch, Roffset, G3_scratch); 3574 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3575 __ br(Assembler::equal, true, Assembler::pt, quicked); 3576 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3577 3578 __ push_ptr(); // save receiver for result, and for GC 3579 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3580 __ get_vm_result_2(RspecifiedKlass); 3581 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3582 3583 __ ba_short(resolved); 3584 3585 // Extract target class from constant pool 3586 __ bind(quicked); 3587 __ add(Roffset, sizeof(ConstantPool), Roffset); 3588 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3589 __ bind(resolved); 3590 __ load_klass(Otos_i, RobjKlass); // get value klass 3591 3592 // Generate a fast subtype check. Branch to cast_ok if no 3593 // failure. Throw exception if failure. 3594 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3595 3596 // Not a subtype; so must throw exception 3597 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3598 3599 __ bind(cast_ok); 3600 3601 if (ProfileInterpreter) { 3602 __ ba_short(done); 3603 } 3604 __ bind(is_null); 3605 __ profile_null_seen(G3_scratch); 3606 __ bind(done); 3607 } 3608 3609 3610 void TemplateTable::instanceof() { 3611 Label done, is_null, quicked, resolved; 3612 transition(atos, itos); 3613 Register Roffset = G1_scratch; 3614 Register RobjKlass = O5; 3615 Register RspecifiedKlass = O4; 3616 3617 // Check for casting a NULL 3618 __ br_null_short(Otos_i, Assembler::pt, is_null); 3619 3620 // Get value klass in RobjKlass 3621 __ load_klass(Otos_i, RobjKlass); // get value klass 3622 3623 // Get constant pool tag 3624 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3625 3626 // See if the checkcast has been quickened 3627 __ get_cpool_and_tags(Lscratch, G3_scratch); 3628 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3629 __ ldub(G3_scratch, Roffset, G3_scratch); 3630 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3631 __ br(Assembler::equal, true, Assembler::pt, quicked); 3632 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3633 3634 __ push_ptr(); // save receiver for result, and for GC 3635 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3636 __ get_vm_result_2(RspecifiedKlass); 3637 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3638 3639 __ ba_short(resolved); 3640 3641 // Extract target class from constant pool 3642 __ bind(quicked); 3643 __ add(Roffset, sizeof(ConstantPool), Roffset); 3644 __ get_constant_pool(Lscratch); 3645 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3646 __ bind(resolved); 3647 __ load_klass(Otos_i, RobjKlass); // get value klass 3648 3649 // Generate a fast subtype check. Branch to cast_ok if no 3650 // failure. Return 0 if failure. 3651 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3652 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3653 // Not a subtype; return 0; 3654 __ clr( Otos_i ); 3655 3656 if (ProfileInterpreter) { 3657 __ ba_short(done); 3658 } 3659 __ bind(is_null); 3660 __ profile_null_seen(G3_scratch); 3661 __ bind(done); 3662 } 3663 3664 void TemplateTable::_breakpoint() { 3665 3666 // Note: We get here even if we are single stepping.. 3667 // jbug inists on setting breakpoints at every bytecode 3668 // even if we are in single step mode. 3669 3670 transition(vtos, vtos); 3671 // get the unpatched byte code 3672 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3673 __ mov(O0, Lbyte_code); 3674 3675 // post the breakpoint event 3676 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3677 3678 // complete the execution of original bytecode 3679 __ dispatch_normal(vtos); 3680 } 3681 3682 3683 //---------------------------------------------------------------------------------------------------- 3684 // Exceptions 3685 3686 void TemplateTable::athrow() { 3687 transition(atos, vtos); 3688 3689 // This works because exception is cached in Otos_i which is same as O0, 3690 // which is same as what throw_exception_entry_expects 3691 assert(Otos_i == Oexception, "see explanation above"); 3692 3693 __ verify_oop(Otos_i); 3694 __ null_check(Otos_i); 3695 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3696 } 3697 3698 3699 //---------------------------------------------------------------------------------------------------- 3700 // Synchronization 3701 3702 3703 // See frame_sparc.hpp for monitor block layout. 3704 // Monitor elements are dynamically allocated by growing stack as needed. 3705 3706 void TemplateTable::monitorenter() { 3707 transition(atos, vtos); 3708 __ verify_oop(Otos_i); 3709 // Try to acquire a lock on the object 3710 // Repeat until succeeded (i.e., until 3711 // monitorenter returns true). 3712 3713 { Label ok; 3714 __ tst(Otos_i); 3715 __ throw_if_not_1_x( Assembler::notZero, ok); 3716 __ delayed()->mov(Otos_i, Lscratch); // save obj 3717 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3718 } 3719 3720 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3721 3722 // find a free slot in the monitor block 3723 3724 3725 // initialize entry pointer 3726 __ clr(O1); // points to free slot or NULL 3727 3728 { 3729 Label entry, loop, exit; 3730 __ add( __ top_most_monitor(), O2 ); // last one to check 3731 __ ba( entry ); 3732 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3733 3734 3735 __ bind( loop ); 3736 3737 __ verify_oop(O4); // verify each monitor's oop 3738 __ tst(O4); // is this entry unused? 3739 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3740 3741 __ cmp(O4, O0); // check if current entry is for same object 3742 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3743 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3744 3745 __ bind( entry ); 3746 3747 __ cmp( O3, O2 ); 3748 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3749 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3750 3751 __ bind( exit ); 3752 } 3753 3754 { Label allocated; 3755 3756 // found free slot? 3757 __ br_notnull_short(O1, Assembler::pn, allocated); 3758 3759 __ add_monitor_to_stack( false, O2, O3 ); 3760 __ mov(Lmonitors, O1); 3761 3762 __ bind(allocated); 3763 } 3764 3765 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3766 // The object has already been poped from the stack, so the expression stack looks correct. 3767 __ inc(Lbcp); 3768 3769 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3770 __ lock_object(O1, O0); 3771 3772 // check if there's enough space on the stack for the monitors after locking 3773 __ generate_stack_overflow_check(0); 3774 3775 // The bcp has already been incremented. Just need to dispatch to next instruction. 3776 __ dispatch_next(vtos); 3777 } 3778 3779 3780 void TemplateTable::monitorexit() { 3781 transition(atos, vtos); 3782 __ verify_oop(Otos_i); 3783 __ tst(Otos_i); 3784 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3785 3786 assert(O0 == Otos_i, "just checking"); 3787 3788 { Label entry, loop, found; 3789 __ add( __ top_most_monitor(), O2 ); // last one to check 3790 __ ba(entry); 3791 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3792 // By using a local it survives the call to the C routine. 3793 __ delayed()->mov( Lmonitors, Lscratch ); 3794 3795 __ bind( loop ); 3796 3797 __ verify_oop(O4); // verify each monitor's oop 3798 __ cmp(O4, O0); // check if current entry is for desired object 3799 __ brx( Assembler::equal, true, Assembler::pt, found ); 3800 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3801 3802 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3803 3804 __ bind( entry ); 3805 3806 __ cmp( Lscratch, O2 ); 3807 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3808 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3809 3810 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3811 __ should_not_reach_here(); 3812 3813 __ bind(found); 3814 } 3815 __ unlock_object(O1); 3816 } 3817 3818 3819 //---------------------------------------------------------------------------------------------------- 3820 // Wide instructions 3821 3822 void TemplateTable::wide() { 3823 transition(vtos, vtos); 3824 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3825 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3826 AddressLiteral ep(Interpreter::_wentry_point); 3827 __ set(ep, G4_scratch); 3828 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3829 __ jmp(G3_scratch, G0); 3830 __ delayed()->nop(); 3831 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3832 } 3833 3834 3835 //---------------------------------------------------------------------------------------------------- 3836 // Multi arrays 3837 3838 void TemplateTable::multianewarray() { 3839 transition(vtos, atos); 3840 // put ndims * wordSize into Lscratch 3841 __ ldub( Lbcp, 3, Lscratch); 3842 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3843 // Lesp points past last_dim, so set to O1 to first_dim address 3844 __ add( Lesp, Lscratch, O1); 3845 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3846 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3847 } 3848 #endif /* !CC_INTERP */